ray tracing triangular mesh objects - c++

I'm trying to write a ray tracer for any objects formed of triangular meshes. I'm using an external library to load a cube from .ply format and then trace it down. So far, I've implemented most of the tracer, and now I'm trying to test it with a single cube, but for some reason all I get on the screen is a red line. I've tried several ways to fix it but I simply can't figure it out anymore. For this primary test, I'm only creating primary rays, and if they hit my cube, then I color that pixel to the cube's diffuse color and return. For checking ray-object intersections, I am going through all the triangles that form that object and return the distance to the closest one. It would be great if you could have a look at the code and tell me what could have gone wrong and where. I would greatly appreciate it.
Ray-Triangle intersection:
bool intersectTri(const Vec3D& ray_origin, const Vec3D& ray_direction, const Vec3D& v0, const Vec3D& v1, const Vec3D& v2, double &t, double &u, double &v) const
{
Vec3D edge1 = v1 - v0;
Vec3D edge2 = v2 - v0;
Vec3D pvec = ray_direction.cross(edge2);
double det = edge1.dot(pvec);
if (det > - THRESHOLD && det < THRESHOLD)
return false;
double invDet = 1/det;
Vec3D tvec = ray_origin - v0;
u = tvec.dot(pvec)*invDet;
if (u < 0 || u > 1)
return false;
Vec3D qvec = tvec.cross(edge1);
v = ray_direction.dot(qvec)*invDet;
if (v < 0 || u + v > 1)
return false;
t = edge2.dot(qvec)*invDet;
if (t < 0)
return false;
return true;
}
//Object intersection
bool intersect(const Vec3D& ray_origin, const Vec3D& ray_direction, IntersectionData& idata, bool enforce_max) const
{
double tClosest;
if (enforce_max)
{
tClosest = idata.t;
}
else
{
tClosest = TMAX;
}
for (int i = 0 ; i < indices.size() ; i++)
{
const Vec3D v0 = vertices[indices[i][0]];
const Vec3D v1 = vertices[indices[i][1]];
const Vec3D v2 = vertices[indices[i][2]];
double t, u, v;
if (intersectTri(ray_origin, ray_direction, v0, v1, v2, t, u, v))
{
if (t < tClosest)
{
idata.t = t;
tClosest = t;
idata.u = u;
idata.v = v;
idata.index = i;
}
}
}
return (tClosest < TMAX && tClosest > 0) ? true : false;
}
Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction)
{
Vec3D objColor = world.background_color;
IntersectionData idata;
double coeff = 1.0;
int depth = 0;
double tClosest = TMAX;
Object *hitObject = NULL;
for (unsigned int i = 0 ; i < world.objs.size() ; i++)
{
IntersectionData idata_curr;
if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false))
{
if (idata_curr.t < tClosest && idata_curr.t > 0)
{
idata.t = idata_curr.t;
idata.u = idata_curr.u;
idata.v = idata_curr.v;
idata.index = idata_curr.index;
tClosest = idata_curr.t;
hitObject = &(world.objs[i]);
}
}
}
if (hitObject == NULL)
{
return world.background_color;
}
else
{
return hitObject->getDiffuse();
}
}
int main(int argc, char** argv)
{
parse("cube.ply");
Vec3D diffusion1(1, 0, 0);
Vec3D specular1(1, 1, 1);
Object cube1(coordinates, connected_vertices, diffusion1, specular1, 0, 0);
World wrld;
// Add objects to the world
wrld.objs.push_back(cube1);
Vec3D background(0, 0, 0);
wrld.background_color = background;
// Set light color
Vec3D light_clr(1, 1, 1);
wrld.light_colors.push_back(light_clr);
// Set light position
Vec3D light(0, 64, -10);
wrld.light_positions.push_back(light);
int width = 128;
int height = 128;
Vec3D *image = new Vec3D[width*height];
Vec3D *pixel = image;
// Trace rays
for (int y = -height/2 ; y < height/2 ; ++y)
{
for (int x = -width/2 ; x < width/2 ; ++x, ++pixel)
{
Vec3D ray_dir(x+0.5, y+0.5, -1.0);
ray_dir.normalize();
Vec3D ray_orig(0.5*width, 0.5*height, 0.0);
*pixel = trace(wrld, ray_orig, ray_dir);
}
}
savePPM("./test.ppm", image, width, height);
return 0;
}
I've just ran a test case and I got this:
for a unit cube centered at (0,0, -1.5) and scaled on the X and Y axis by 100. It seems that there is something wrong with the projection, but I can't really tell exactly what from the result. Also, shouldn't, in this case (cube is centered at (0,0)) the final object also appear in the middle of the picture?
FIX: I fixed the centering problem by doing ray_dir = ray_dir - ray_orig before normalizing and calling the trace function. Still, the perspective seems to be plain wrong.

I continued the work and now I started implementing the diffuse reflection according to Phong.
Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction)
{
Vec3D objColor = Vec3D(0);
IntersectionData idata;
double coeff = 1.0;
int depth = 0;
do
{
double tClosest = TMAX;
Object *hitObject = NULL;
for (unsigned int i = 0 ; i < world.objs.size() ; i++)
{
IntersectionData idata_curr;
if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false))
{
if (idata_curr.t < tClosest && idata_curr.t > 0)
{
idata.t = idata_curr.t;
idata.u = idata_curr.u;
idata.v = idata_curr.v;
idata.index = idata_curr.index;
tClosest = idata_curr.t;
hitObject = &(world.objs[i]);
}
}
}
if (hitObject == NULL)
{
return world.background_color;
}
Vec3D newStart = ray_origin + ray_direction*idata.t;
// Compute normal at intersection by interpolating vertex normals (PHONG Idea)
Vec3D v0 = hitObject->getVertices()[hitObject->getIndices()[idata.index][0]];
Vec3D v1 = hitObject->getVertices()[hitObject->getIndices()[idata.index][1]];
Vec3D v2 = hitObject->getVertices()[hitObject->getIndices()[idata.index][2]];
Vec3D n1 = hitObject->getNormals()[hitObject->getIndices()[idata.index][0]];
Vec3D n2 = hitObject->getNormals()[hitObject->getIndices()[idata.index][1]];
Vec3D n3 = hitObject->getNormals()[hitObject->getIndices()[idata.index][2]];
// Vec3D N = n1 + (n2 - n1)*idata.u + (n3 - n1)*idata.v;
Vec3D N = v0.computeFaceNrm(v1, v2);
if (ray_direction.dot(N) > 0)
{
N = N*(-1);
}
N.normalize();
Vec3D lightray_origin = newStart;
for (unsigned int itr = 0 ; itr < world.light_positions.size() ; itr++)
{
Vec3D lightray_dir = world.light_positions[0] - newStart;
lightray_dir.normalize();
double cos_theta = max(N.dot(lightray_dir), 0.0);
objColor.setX(objColor.getX() + hitObject->getDiffuse().getX()*hitObject->getDiffuseReflection()*cos_theta);
objColor.setY(objColor.getY() + hitObject->getDiffuse().getY()*hitObject->getDiffuseReflection()*cos_theta);
objColor.setZ(objColor.getZ() + hitObject->getDiffuse().getZ()*hitObject->getDiffuseReflection()*cos_theta);
return objColor;
}
depth++;
} while(coeff > 0 && depth < MAX_RAY_DEPTH);
return objColor;
}
When I reach an object with the primary ray, I send another ray to the light source positioned at (0,0,0) and return the color according to the Phong illumination model for diffuse reflection, but the result is really not the expected one: http://s15.postimage.org/vc6uyyssr/test.png. The cube is a unit cube centered at (0,0,0) and then translated by (1.5, -1.5, -1.5). From my point of view, the left side of the cube should get more light and it actually does. What do you think of it?

Related

How to obtain smoothed normals when extruding a 2d curve (with parametric normals) into 3d?

I'm extruding a sine-wave curve into 3d but when rendering, I can see that the normals are not smoothed.
The sine-wave is generated with parametric normals, as follows:
vector<CurvePoint> sineWave(int n, float x0, float y0, float step, float period)
{
vector<CurvePoint> curve;
for (int i = 0; i < n; i++) {
float a = TWO_PI / period;
float x = x0 + i * step;
float y = y0 - sinf(x * a);
float c = cosf(x * a);
auto normal = glm::vec2(a * c, 1) / sqrtf(a * a * c * c + 1);
curve.emplace_back(glm::vec2(x, y), normal);
}
return curve;
}
The extruding method:
void extrude(IndexedVertexBatch<XYZ.N> &batch, const Matrix &matrix, const vector<CurvePoint> &curve, GLenum frontFace, float distance)
{
auto size = curve.size();
if (size > 1 && distance != 0) {
bool cw = ((frontFace == CW) && (distance > 0)) || ((frontFace == CCW) && (distance < 0));
for (auto i = 0; i < size - 1; i++) {
auto &p0 = curve[i].position;
auto &p1 = curve[i + 1].position;
auto normal = matrix.transformNormal(glm::vec3(curve[i].normal, 0));
batch
.addVertex(matrix.transformPoint(p0), normal)
.addVertex(matrix.transformPoint(p1), normal)
.addVertex(matrix.transformPoint(glm::vec3(p1, distance)), normal)
.addVertex(matrix.transformPoint(glm::vec3(p0, distance)), normal);
if (cw) {
batch.addIndices(0, 3, 2, 2, 1, 0);
} else {
batch.addIndices(0, 1, 2, 2, 3, 0);
}
batch.incrementIndices(4);
}
}
}
The rendering (phong-like shading):
How can I obtain smoothed normals?
Stupid me. It was a small bug in the extruding method, which should be like:
void extrude(IndexedVertexBatch<XYZ.N> &batch, const Matrix &matrix, const vector<CurvePoint> &curve, GLenum frontFace, float distance)
{
auto size = curve.size();
if (size > 1 && distance != 0) {
bool cw = ((frontFace == CW) && (distance > 0)) || ((frontFace == CCW) && (distance < 0));
for (auto i = 0; i < size - 1; i++) {
auto &p0 = curve[i].position;
auto &p1 = curve[i + 1].position;
auto normal0 = matrix.transformNormal(glm::vec3(curve[i].normal, 0));
auto normal1 = matrix.transformNormal(glm::vec3(curve[i + 1].normal, 0));
batch
.addVertex(matrix.transformPoint(p0), normal0)
.addVertex(matrix.transformPoint(p1), normal1)
.addVertex(matrix.transformPoint(glm::vec3(p1, distance)), normal1)
.addVertex(matrix.transformPoint(glm::vec3(p0, distance)), normal0);
if (cw) {
batch.addIndices(0, 3, 2, 2, 1, 0);
} else {
batch.addIndices(0, 1, 2, 2, 3, 0);
}
batch.incrementIndices(4);
}
}
}

Calculating the diffuse r,g,bvalue of pixel on raytracer using Blinn-Phong

I am trying to calculate the RGB value of a pixel using the Blinn-Phong formula. For that I use this function:
Material getPixelColor(Ray ray, double min, int index, std::vector<Object*> Objects, std::vector<Object*> lightSources) {
Vector intersectionPoint = ray.getOrigin() + ray.getDirection() * min;
Vector n = Objects.at(index)->getNormalAt(intersectionPoint);
Vector reflectiondirection = ray.getDirection() - n * Vector::dot(ray.getDirection(), n) * 2;
Ray reflectionRay(intersectionPoint, reflectiondirection);
// check if ray intersects any other object;
double minimum = INFINITY;
int count = 0, indx = -1;
for (auto const& obj : Objects) {
double distance = obj->Intersect(reflectionRay);
if (minimum > distance) {
minimum = distance;
indx = count;
}
count++;
}
Material result(0,0,0);
if (recurseDepth >= 5 || indx == -1) {
recurseDepth = 0;
// Check if object is lit for each light source
for (auto const& light : lightSources) {
// Blinn-Phong
Vector lightDirection = (light->getPosition() - intersectionPoint).normalize();
double nl = Vector::dot(n, lightDirection);
nl = nl > 0 ? nl : 0.0;
result = result + (Objects.at(index)->getMaterial() * light->getMaterial() * nl);
}
}
else{
recurseDepth++;
result = result + getPixelColor(reflectionRay, minimum, indx, Objects, lightSources);
}
return result;
}
The result that I get is this:
This is how it was without shading:
I have been trying to find a solution for hours and can't. Am I using the wrong formula?
After a lot of research, I removed the part where it is getting color from other objects:
Material getPixelColor(Ray ray, double min, int index, std::vector<Object*> Objects, std::vector<Object*> lightSources) {
Vector intersectionPoint = ray.getOrigin() + ray.getDirection() * min;
Vector n = Objects.at(index)->getNormalAt(intersectionPoint);
Material result(0,0,0);
// Check if object is lit for each light source
for (auto const& light : lightSources) {
//create a ray to the light and check if there is an object between the two
Vector lightDirection = (light->getPosition() - intersectionPoint).normalize();
Ray lightRay(intersectionPoint, lightDirection);
bool hit = false;
for (auto const& obj : Objects) {
double distance = obj->Intersect(lightRay);
if (INFINITY > distance && distance > 0.0001) {
hit = true;
break;
}
}
if (!hit) {
// Blinn-Phong
double nl = Vector::dot(n, lightDirection);
// clamp nl between 0 and 1
if (nl > 1.0) {
nl = 1.0;
}
else if (nl < 0.0) {
nl = 0.0;
}
result = result + (Objects.at(index)->getMaterial() * nl);
}
}
return result;
}
And so I got the desired result:

Ray Tracer, shadow ray produces black circle?

As you can see in the image, I'm getting a black circle on the top of the spheres and the image appears grainy. It's supposed to be more sharp however there are these small black and white spots.
This is the code for the shadow ray
int pos = 0;
float intersect(const ray &r, vector<unique_ptr<object>> &obj)
{
//gives closest object hit point and position;
float closest = numeric_limits<float>::max();
for(int j = 0; j < obj.size(); j++)
{
float t = obj[j]->intersect(r);
if(t > 1e-6 && t < closest)
{
closest = t;
pos = j;
}
}
return closest;
}
vec color(const ray& r, vector<unique_ptr<object>> &shape, vector<unique_ptr<Light>> &lighting, int depth)
{
vec background_color( .678, .847, .902);
vec total{0.0, 0.0, 0.0};
vec ambient{0.125, 0.125, 0.125};
float t_near = intersect(r, shape);
if(t_near == numeric_limits<float>::max())
return background_color;
else
{
total += ambient;
for(int i = 0; i < lighting.size(); i++){
total += shape[pos]->shade(lighting[i]->position(), t_near, r);//gives specular + diffuse
vec shadow_dir = unit_vector(lighting[i]->position() - r.p_at_par(t_near));
ray shadowRay(r.p_at_par(t_near), shadow_dir);
float dist = shadow_dir.lenght();
float a = intersect(shadowRay, shape);
if(a != numeric_limits<float>::max())
return vec(0.0, 0.0, 0.0);
}
return total;
}
}
Okay, got it.
For the black circle, you have to test for the distance of the shadow ray to be less that the distance between the point and the light source. Also, for the distance, the shadow_dir shouldn't be normalised. And to deal with the black white spots, which is dude to shadow intersection, you have to add N*bias to the hit point where the bias is for example 1e-4. The bias shouldn't be too small
vec shadow_dir = lighting[i]->position() - r.p_at_par(t_near);
float dist = shadow_dir.lenght();
vec N = unit_vector(shape[pos]->normal(r, t_near));
shadow_dir = unit_vector(shadow_dir);
ray shadowRay(r.p_at_par(t_near) + N*1e-4, shadow_dir);
float a = intersect(shadowRay, shape);
if(a != numeric_limits<float>::max()){
float m = shadowRay.p_at_par(a).lenght();
if(a < dist)
return vec(0.0, 0.0, 0.0);
}
}

Bounding Volume Hierarchy ray traversal issues

I've successfully implemented BVH as described in PBRT. This one although has a slightly huge issue - the traversal looks through ALL nodes that intersect the ray, which is wrong (in terms of performance).
So I ended up optimizing the ray traversal, currently I use the version from Aila & Laine implementation of their "Understanding the efficiency of ray traveral on GPU". First, here is the code:
INLINE bool BVH::Traverse(TriangleWoop* prims, Ray* ray, IntersectResult* result)
{
unsigned int todo[32];
unsigned int todoOffset = 0;
unsigned int nodeNum = 0;
bool hit = false;
IntersectResult tmp = IntersectResult();
*(int*)&tmp.data.w = -1;
float tmin = 2e30f;
float4 origin = ray->origin;
float4 direction = ray->direction;
float4 invdir = rcp(direction);
float tmpx = 0.0f, tmpy = 0.0f;
while(true)
{
while(this->nodes[nodeNum].prim_count == 0)
{
tmpx += 0.01f;
tmpy += 0.001f;
float4 c0v1 = (this->nodes[nodeNum + 1].bounds.minPt - origin) * invdir;
float4 c0v2 = (this->nodes[nodeNum + 1].bounds.maxPt - origin) * invdir;
float4 c1v1 = (this->nodes[this->nodes[nodeNum].above_child].bounds.minPt - origin) * invdir;
float4 c1v2 = (this->nodes[this->nodes[nodeNum].above_child].bounds.maxPt - origin) * invdir;
float4 c0n = f4min(c0v1, c0v2);
float4 c0f = f4max(c0v1, c0v2);
float4 c1n = f4min(c1v1, c1v2);
float4 c1f = f4max(c1v1, c1v2);
float n0 = max(c0n.x, max(c0n.y, c0n.z));
float f0 = min(c0f.x, min(c0f.y, c0f.z));
float n1 = max(c1n.x, max(c1n.y, c1n.z));
float f1 = min(c1f.x, min(c1f.y, c1f.z));
bool child0 = (f0 > 0.0f) && (n0 < f0);
bool child1 = (f1 > 0.0f) && (n1 < f1);
child0 &= (n0 < tmin);
child1 &= (n1 < tmin);
unsigned int nodeAddr = this->nodes[nodeNum].above_child;
nodeNum = nodeNum + 1;
if(child0 != child1)
{
if(child1)
{
nodeNum = nodeAddr;
}
}
else
{
if(!child0)
{
if(todoOffset == 0)
{
goto result;
}
nodeNum = todo[--todoOffset];
}
else
{
if(n1 < n0)
{
swap(nodeNum, nodeAddr);
}
todo[todoOffset++] = nodeAddr;
}
}
}
if(this->nodes[nodeNum].prim_count > 0)
{
for(unsigned int i = this->nodes[nodeNum].prim_offset; i < this->nodes[nodeNum].prim_offset + this->nodes[nodeNum].prim_count; i++)
{
const TriangleWoop* tri = &prims[this->indexes[i]];
if(IntersectRayTriangleWoop(ray, tri, &tmp))
{
if(tmp.data.z > 0.0f && tmp.data.z < result->data.z)
{
tmin = tmp.data.z;
result->data.z = tmp.data.z;
result->data.x = tmp.data.x;
result->data.y = tmp.data.y;
*(int*)&result->data.w = this->indexes[i];
hit = true;
}
}
}
}
if(todoOffset == 0)
{
goto result;
}
nodeNum = todo[--todoOffset];
}
result:
result->data.x = tmpx;
result->data.y = tmpy;
return hit;
}
Technically it's just a standard while-while stack ray-bvh traversal. Now to the main problem, look at next image (viewing sponza from outside), in color you can see how much nodes in BVH has been visited (full red = 100, full yellow = 1100):
Next image shows similar situation inside:
As you can see this is kind of a problem - it just has to traverse much more nodes than it's supposed to. Can someone see something wrong with my code? Any advice is welcomed as I'm stucked with this for few days already and can't think off some solution.

Raytracing - Ray/Triangle Intersection

I am having an issue with my algorithm to check if my ray intersect a 3D triangle. It seems to be still drawing in the circle behind it(top left hand corner). I can't seem to find out where in my code is causing this slight error.
bool Mesh::intersectTriangle(Ray const &ray,
Triangle const &tri,
Intersection &hit) const{
// Extract vertex positions from the mesh data.
Vector const &p0 = positions[tri[0].pi];
Vector const &p1 = positions[tri[1].pi];
Vector const &p2 = positions[tri[2].pi];
Vector e1 = p1 - p0;
Vector e2 = p2 - p0;
Vector e1e2 = e1.cross(e2);
Vector p = ray.direction.cross(e2);
e1e2.normalized();
float a = e1.dot(p);
if(a < 0.000001)
return false;
float f = 1 / a;
Vector s = ray.origin - p0;
float u = f*(s.dot(p));
if(u < 0.0 || u > 1.0)
return false;
Vector q = s.cross(e1);
float v = f * (ray.direction.dot(q));
if(v < 0.0 || u + v > 1.0)
return false;
float t = f * (e2.dot(q));
hit.depth = t;
hit.normal = e1e2;
hit.position = hit.position *t;
return true;