Ray-Cube Intersection C++ - c++

I am writing a ray tracing program in C++ to get better at the language. However I am having some troubles implementing my ray-cube intersection functionality.
Here is my class
class Cube {
public:
Cube() {};
~Cube() {};
float Intersection(Ray ray) {
// Intersection Code Here
}
public:
glm::vec3 Position;
glm::vec3 Size;
int MaterialIndex = 0;
};
I can't find any good references anywhere that fit my need (using vec3s for size and position). I need to return a float that is the distance to the Cube or -1 if it does not hit.
For reference here is my Sphere class
class Sphere {
public:
Sphere() {};
~Sphere() {};
float Intersection(Ray ray) {
glm::vec3 origin = ray.Origin - Position;
float a = glm::dot(ray.Direction, ray.Direction);
float b = 2.0f * glm::dot(origin, ray.Direction);
float c = glm::dot(origin, origin) - Radius * Radius;
// Quadratic forumula discriminant:
// b^2 - 4ac
float discriminant = b * b - 4.0f * a * c;
// Quadratic formula:
// (-b +- sqrt(discriminant)) / 2a
// float t0 = (-b + glm::sqrt(discriminant)) / (2.0f * a); // Second hit distance (currently unused)
float closestT = (-b - glm::sqrt(discriminant)) / (2.0f * a);
return closestT;
};
public:
float Radius = 0.5f;
glm::vec3 Position;
int MaterialIndex = 0;
};
Can anyone help me?

Related

DX12) Trying to Implement Volumetric Scattering for multiple Spot Light, but It's not going well

(This Image is What I want to implement)
I am attempting Post Processing using Compute Shader to implement Light Shaft for multiple Spot Lights in the DX12 framework.
The first thing I tried was the method at the following link:https://gitlab.com/tomasoh/100_procent_more_volume/-/blob/master/shaders/volumetric.frag
It's a very complicated and hard-to-understand kind of shader, but it's basically built on the premise of using multiple lights, so it's a kind of example for the purpose.
However, since the game I'm making has 32 light source limitations, considering that excessive amount of Frame Drop will occur in the part of calculating Visibility by making Shadow Map for all light sources, I decided to implement Visibility as 1.0 Constant and did not get the desired result. (Of course it's a result.)
Down below is how I did this thing:
#include "lighting.hlsl"
Texture2D<float4> inputTexture : register(t0);
Texture2D<float> depthTexture : register(t1);
RWTexture2D<float4> outputTexture : register(u0);
#define PI 3.141592653589793238f
cbuffer VolumetricCB : register(b1)
{
float absorptionTau : packoffset(c0);
float3 absorptionColor : packoffset(c0.y);
int scatteringSamples : packoffset(c1.x);
float scatteringTau : packoffset(c1.y);
float scatteringZFar : packoffset(c1.z);
float3 scatteringColor : packoffset(c2);
matrix gInvProj : packoffset(c3);
matrix gInvView : packoffset(c7);
float3 gCameraPos : packoffset(c11);
Light gLights[NUM_LIGHTS] : packoffset(c12);
}
float random(float2 co)
{
return frac(sin(dot(co.xy, float2(12.9898, 78.233))) * 43758.5453123);
}
float3 PixelWorldPos(float depthValue, int2 pixel)
{
uint width, height;
inputTexture.GetDimensions(width, height);
float2 fPixel = float2(pixel.x, pixel.y);
float x = (fPixel.x / width * 2) - 1;
float y = (fPixel.y / height * (-2)) + 1;
float z = depthValue;
float4 ndcCoords = float4(x, y, z, 1.0f);
float4 p = mul(ndcCoords, gInvProj);
p /= p.w;
float4 worldCoords = mul(p, gInvView);
return worldCoords.xyz;
}
float3 absorptionTransmittance(float dist)
{
return absorptionColor * exp(-dist * (absorptionTau + scatteringTau));
}
float phaseFunction(float3 inDir, float3 outDir)
{
float cosAngle = dot(inDir, outDir) / (length(inDir) * length(outDir));
float x = (1.0 + cosAngle) / 2.0;
float x2 = x * x;
float x4 = x2 * x2;
float x8 = x4 * x4;
float x16 = x8 * x8;
float x32 = x16 * x16;
float nom = 0.5 + 16.5 * x32;
float factor = 1.0 / (4.0 * PI);
return nom * factor;
}
float3 volumetricScattering(float3 worldPosition, Light light)
{
float3 result = float3(0.0, 0.0, 0.0);
float3 camToFrag = worldPosition - gCameraPos;
if (length(camToFrag) > scatteringZFar)
{
camToFrag = normalize(camToFrag) * scatteringZFar;
}
float3 deltaStep = camToFrag / (scatteringSamples + 1);
float3 fragToCamNorm = normalize(gCameraPos - worldPosition);
float3 x = gCameraPos;
float rand = random(worldPosition.xy + worldPosition.z);
x += (deltaStep * rand);
for (int i = 0; i < scatteringSamples; ++i)
{
float visibility = 1.0;
float3 lightToX = x - light.Position;
float lightDist = length(lightToX);
float omega = 4 * PI * lightDist * lightDist;
float3 Lin = absorptionTransmittance(lightDist) * visibility * light.Diffuse * light.SpotPower / omega;
float3 Li = Lin * scatteringTau * scatteringColor * phaseFunction(normalize(lightToX), fragToCamNorm);
result += Li * absorptionTransmittance(distance(x, gCameraPos)) * length(deltaStep);
x += deltaStep;
}
return result;
}
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
int2 pixel = int2(dispatchID.x, dispatchID.y);
float4 volumetricColor = float4(0.0, 0.0, 0.0, 1.0);
float depthValue = depthTexture[pixel].r;
float3 worldPosition = PixelWorldPos(depthValue, pixel);
float fragCamDist = distance(worldPosition, gCameraPos);
for (int i = 0; i < NUM_LIGHTS; ++i)
{
if (gLights[i].Type == SPOT_LIGHT && gLights[i].FalloffEnd > length(gLights[i].Position - worldPosition))
volumetricColor += float4(volumetricScattering(worldPosition, gLights[i]), 0.0);
}
outputTexture[pixel] = volumetricColor + inputTexture[pixel];
}
(AbsorptionTau = -0.061f, ScatteringTau = 0.059f)
All these Codes for that Tiny Spot...
The second method was shown in Chapter 13 of GPU GEM3.
It was a method of drawing only Light Source on a separate Render Target, processing the Render Target using Post Processing Shder to create light scattering, and then merging it with a back buffer. (At least that's how I understand it.)
However, this method was designed only for one very strong light, and to fix it, I modified the code as below, but it didn't work well.
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
uint2 pixel = dispatchID.xy;
uint width, height;
inputTexture.GetDimensions(width, height);
float4 result = inputTexture[pixel];
for (int i = 0; i < NUM_LIGHTS; ++i)
{
if(gLights[i].Type == SPOT_LIGHT)
{
float2 texCoord = float2(pixel.x / width, pixel.y / height);
float2 deltaTexCoord = (texCoord - mul(mul(float4(gLights[i].Position, 1.0f), gView), gProj).xy);
deltaTexCoord *= 1.0f / NUM_SAMPLES * Density;
float3 color = inputTexture[pixel].rgb;
float illuminationDecay = 1.0f;
for (int j = 0; j < NUM_SAMPLES; j++)
{
texCoord -= deltaTexCoord;
uint2 modifiedPixel = uint2(texCoord.x * width, texCoord.y * height);
float3 sample = inputTexture[modifiedPixel].rgb;
sample *= illuminationDecay * Weight;
color += sample;
illuminationDecay *= Decay;
}
result += float4(color * Exposure, 1);
}
}
outputTexture[pixel] = result;
}
this just 'Blur' these light source map, and surely it's not what I wanted.
Is there a similar kind of example to the implementation that I want, or is there a simpler way to do this? I've spent a week on this issue, but I haven't achieved much.
edit :
I did it! but there's some error about direction of light volume.
[numthreads(32, 32, 1)]
void CS(uint3 dispatchID : SV_DispatchThreadID)
{
float4 result = { 0.0f, 0.0f, 0.0f, 0.0f };
uint2 pixel = dispatchID.xy;
uint width, height;
inputTexture.GetDimensions(width, height);
float2 texCoord = (float2(pixel) + 0.5f) / float2(width, height);
float depth = depthTexture[pixel].r;
float3 screenPos = GetPositionVS(texCoord, depth);
float3 rayEnd = float3(0.0f, 0.0f, 0.0f);
const uint sampleCount = 16;
const float stepSize = length(screenPos - rayEnd) / sampleCount;
// Perform ray marching to integrate light volume along view ray:
[loop]
for (uint i = 0; i < NUM_LIGHTS; ++i)
{
[branch]
if (gLights[i].Type == SPOT_LIGHT)
{
float3 V = float3(0.0f, 0.0f, 0.0f) - screenPos;
float cameraDistance = length(V);
V /= cameraDistance;
float marchedDistance = 0;
float accumulation = 0;
float3 P = screenPos + V * stepSize * dither(pixel.xy);
for (uint j = 0; j < sampleCount; ++j)
{
float3 L = mul(float4(gLights[i].Position, 1.0f), gView).xyz - P;
const float dist2 = dot(L, L);
const float dist = sqrt(dist2);
L /= dist;
//float3 viewDir = mul(float4(gLights[i].Direction, 1.0f), gView).xyz;
float3 viewDir = gLights[i].Direction;
float SpotFactor = dot(L, normalize(-viewDir));
float spotCutOff = gLights[i].outerCosine;
[branch]
if (SpotFactor > spotCutOff)
{
float attenuation = DoAttenuation(dist, gLights[i].Range);
float conAtt = saturate((SpotFactor - gLights[i].outerCosine) / (gLights[i].innerCosine - gLights[i].outerCosine));
conAtt *= conAtt;
attenuation *= conAtt;
attenuation *= ExponentialFog(cameraDistance - marchedDistance);
accumulation += attenuation;
}
marchedDistance += stepSize;
P = P + V * stepSize;
}
accumulation /= sampleCount;
result += max(0, float4(accumulation * gLights[i].Color * gLights[i].VolumetricStrength, 1));
}
}
outputTexture[pixel] = inputTexture[pixel] + result;
}
this is my compute shader, but when I doesn't multiply view matrix to direction, it goes wrong like this :
as you can see, street lamp's volume direction is good, but vehicle's headlight's volume direction is different from it's spot light direction.
and when I multiply view matrix to direction :
head lights gone wrong AND street lamp goes wrong too.
I still finding where's wrong in my cpu codes, but I haven't find anything.
this might be helpful. here's my shader code about spot lighting.
float CalcAttenuation(float d, float falloffStart, float falloffEnd)
{
return saturate((falloffEnd - d) / (falloffEnd - falloffStart));
}
float3 BlinnPhongModelLighting(float3 lightDiff, float3 lightVec, float3 normal, float3 view, Material mat)
{
const float m = mat.Exponent;
const float f = ((mat.IOR - 1) * (mat.IOR - 1)) / ((mat.IOR + 1) * (mat.IOR + 1));
const float3 fresnel0 = float3(f, f, f);
float3 halfVec = normalize(view + lightVec);
float roughness = (m + 8.0f) * pow(saturate(dot(halfVec, normal)), m) / 8.0f;
float3 fresnel = CalcReflectPercent(fresnel0, halfVec, lightVec);
float3 specular = fresnel * roughness;
specular = specular / (specular + 1.0f);
return (mat.Diffuse.rgb + specular * mat.Specular) * lightDiff;
}
float3 ComputeSpotLight(Light light, Material mat, float3 pos, float3 normal, float3 view)
{
float3 result = float3(0.0f, 0.0f, 0.0f);
bool bCompute = true;
float3 lightVec = light.Position - pos;
float d = length(lightVec);
if (d > light.FalloffEnd)
bCompute = false;
if (bCompute)
{
lightVec /= d;
float ndotl = max(dot(lightVec, normal), 0.0f);
float3 lightDiffuse = light.Diffuse * ndotl;
float att = CalcAttenuation(d, light.FalloffStart, light.FalloffEnd);
lightDiffuse *= att;
float spotFactor = pow(max(dot(-lightVec, light.Direction), 0.0f), light.SpotPower);
lightDiffuse *= spotFactor;
result = BlinnPhongModelLighting(lightDiffuse, lightVec, normal, view, mat);
}
return result;
}

Drawing multiple objects while making them share a shader

There are two draw object groups that I'd like to combine into one canvas, a rain effect layer and points forming a circle. These are their vertex shader codes, taken from this website
// Rain effect
const vs = `#version 300 es
uniform int numVerts;
uniform float time;
// hash function from https://www.shadertoy.com/view/4djSRW
// given a value between 0 and 1
// returns a value between 0 and 1 that *appears* kind of random
float hash(float p) {
vec2 p2 = fract(vec2(p * 5.3983, p * 5.4427));
p2 += dot(p2.yx, p2.xy + vec2(21.5351, 14.3137));
return fract(p2.x * p2.y * 95.4337);
}
void main() {
float u = float(gl_VertexID) / float(numVerts); // goes from 0 to 1
float off = floor(time + u) / 1000.0; // changes once per second per vertex
float x = hash(u + off) * 2.0 - 1.0; // random position
float y = fract(time + u) * -2.0 + 1.0; // 1.0 -> -1.0
gl_Position = vec4(x, y, 0, 1);
gl_PointSize = 2.0;
}
`;
// Points forming a circle
const vs = `#version 300 es
uniform int numVerts;
uniform vec2 resolution;
#define PI radians(180.0)
void main() {
float u = float(gl_VertexID) / float(numVerts); // goes from 0 to 1
float angle = u * PI * 2.0; // goes from 0 to 2PI
float radius = 0.8;
vec2 pos = vec2(cos(angle), sin(angle)) * radius;
float aspect = resolution.y / resolution.x;
vec2 scale = vec2(aspect, 1);
gl_Position = vec4(pos * scale, 0, 1);
gl_PointSize = 5.0;
}
`;
Both shaders assign different values to gl_Position and gl_PointSize. I couldn't think of a way to combine them under one vertex shader without conflict. It doesn't help that the vertex dataset is entirely generated on site, inside the shader instead of having it passed from the buffer.
Add a uniform variable to the shader that indicates which algorithm to use.
#version 300 es
uniform int numVerts;
uniform float time;
uniform int mode; // 0 or 1
// hash function
// [...]
void main() {
float u = float(gl_VertexID) / float(numVerts); // goes from 0 to 1
vec2 pos;
float pointSize;
if (mode == 0) {
float off = floor(time + u) / 1000.0; // changes once per second per vertex
float x = hash(u + off) * 2.0 - 1.0; // random position
float y = fract(time + u) * -2.0 + 1.0; // 1.0 -> -1.0
pos = vec2(x, y);
pointSize = 2.0
}
else {
float angle = u * PI * 2.0; // goes from 0 to 2PI
float radius = 0.8;
float aspect = resolution.y / resolution.x;
vec2 scale = vec2(aspect, 1);
pos = vec2(cos(angle), sin(angle)) * radius * scale;
pointSize = 5.0;
}
gl_Position = vec4(pos, 0, 1);
gl_PointSize = pointSize;
}

Fish-eye warping about mouse position - fragment shader

I'm trying to create a fish-eye effect but only in a small radius around the mouse position. I've been able to modify this code to work about the mouse position (demo) but I can't figure out where the zooming is coming from. I'm expecting the output to warp the image similarly to this (ignore the color inversion for the sake of this question):
Relevant code:
// Check if within given radius of the mouse
vec2 diff = myUV - u_mouse - 0.5;
float distance = dot(diff, diff); // square of distance, saves a square-root
// Add fish-eye
if(distance <= u_radius_squared) {
vec2 xy = 2.0 * (myUV - u_mouse) - 1.0;
float d = length(xy * maxFactor);
float z = sqrt(1.0 - d * d);
float r = atan(d, z) / PI;
float phi = atan(xy.y, xy.x);
myUV.x = d * r * cos(phi) + 0.5 + u_mouse.x;
myUV.y = d * r * sin(phi) + 0.5 + u_mouse.y;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor.rgb = tex;
This is my first shader, so other improvements besides fixing this issue are also welcome.
Compute the vector from the current fragment to the mouse and the length of the vector:
vec2 diff = myUV - u_mouse;
float distance = length(diff);
The new texture coordinate is the sum of the mouse position and the scaled direction vector:
myUV = u_mouse + normalize(diff) * u_radius * f(distance/u_radius);
For instance:
uniform float u_radius;
uniform vec2 u_mouse;
void main()
{
vec2 diff = myUV - u_mouse;
float distance = length(diff);
if (distance <= u_radius)
{
float scale = (1.0 - cos(distance/u_radius * PI * 0.5));
myUV = u_mouse + normalize(diff) * u_radius * scale;
}
vec3 tex = texture2D(tMap, myUV).rgb;
gl_FragColor = vec4(tex, 1.0);
}

Ray tracing using the Phong illumination model: bizarre color in lighting seemingly fixed by reducing light intensity?

I'm writing a simple ray-tracer with lighting using Phong illumination model. But the problem is that there's part of the sphere display a whole different color. For example, the sphere should be only green in this.
I tried to reduce the light intensity, then it somehow displays correctly like this.
This is the code for primary rays
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
Ray ray(gCamera);
float x = iX + j * pSize;
float y = iY - i * pSize;
ray.v = vec3(x * scale, y * scale, 0) - gCamera;
gPixels[i][j] = trace(ray);
}
}
And this is the code for the intersection (testing with sphere at origin without any transformation)
double findIntersection(const Ray& ray) {
dvec3 u = mXfmInverse * dvec4(ray.u, 1.0);
dvec3 v = mXfmInverse * dvec4(ray.v, 0.0);
double a = glm::dot(v, v);
double b = 2 * glm::dot(u, v);
double c = glm::dot(u, u) - 1;
double delta = b * b - 4 * a * c;
if (delta < 0) return -1;
double root = sqrt(delta);
double t0 = 0.5 * (-b - root) / a;
if (t0 >= 0) return t0;
double t1 = 0.5 * (-b + root) / a;
return t1 >= 0 ? t1 : -1;
}
and calculating Phong illumination
Material material = ray.sphere->getMaterial();
// diffuse
dvec3 center = ray.sphere->getXfm() * vec4(0, 0, 0, 1);
dvec3 normal = glm::normalize(hitPoint - center);
dvec3 lightDir = glm::normalize(light.position - hitPoint);
double lambertian = max(glm::dot(normal, lightDir), 0.0);
// specular
double specular = 0;
if (lambertian > 0) {
dvec3 viewDir = glm::normalize(-ray.v);
dvec3 reflectDir = glm::reflect(-lightDir, normal);
specular = pow(max(dot(viewDir, reflectDir), 0.0), material.shininess);
}
dvec3 color = lambertian * material.diffuse + specular * material.specular;
return color * light.color;
}

Metal - camera rotation in signed distance function

I am learning from this tutorial and got this metal code and change a bit to use mouse to control the position of the camera:
struct Ray {
float3 origin;
float3 direction;
Ray(float3 o, float3 d) {
origin = o;
direction = d;
}
};
struct Sphere {
float3 center;
float radius;
Sphere(float3 c, float r) {
center = c;
radius = r;
}
};
float distToSphere(Ray ray, Sphere s) {
return length(ray.origin - s.center) - s.radius;
}
float distToScene(Ray r) {
Sphere s = Sphere(float3(1.), 0.5);
Ray repeatRay = r;
repeatRay.origin = fmod(r.origin, 2.);
return distToSphere(repeatRay, s);
}
kernel void compute(texture2d<float, access::write> output [[texture(0)]],
constant float &mouseX [[buffer(1)]],
constant float &mouseY [[buffer(2)]],
uint2 gid [[thread_position_in_grid]]) {
int width = output.get_width();
int height = output.get_height();
float2 uv = float2(gid) / float2(width, height);
uv = uv * 2.0 - 1.0;
float3 camPos = float3(1000+mouseX*-0.05, 1000-mouseY*0.05, 1);
Ray ray = Ray(camPos, normalize(float3(uv.x,uv.y, 1.)));
float3 col = float3(0.);
for (int i=0.; i<100.; i++) {
float dist = distToScene(ray);
if (dist < 0.001) {
col = float3(1.);
break;
}
ray.origin += ray.direction * dist;
}
float3 posRelativeToCamera = ray.origin - camPos;
output.write(float4(col * abs((posRelativeToCamera) / 10.0), 1.), gid);
}
Everything works fine. Now, instead of position, I want to rotate the camera with mouse. I did some research from shadertoy.com and convert the setCamera() code into metal:
float3x3 setCamera( float3 origin, float3 target, float rotation) {
float3 forward = normalize(target - origin);
float3 orientation = float3(sin(rotation), cos(rotation), 0.0);
float3 left = normalize(cross(forward, orientation));
float3 up = normalize(cross(left, forward));
return float3x3(left, up, forward);
}
However, I don't know how to inject in my code to make it work.
How can I make this to work?