Transparent/noisy spheres when using simple diffuse calculations - c++

I've been trying to write a raytracer but I came across a problem when trying to implement simple diffuse calculations (trying to replicate the first ones from Ray Tracing in One Weekend but without a guide)
Here's the relevant code:
Intersection/diffuse calculations:
#pragma once
#include "Camera.h"
#include <cmath>
#include "Defs.h"
template<typename O>
class Sphere{
O Radius;
Point3<O> Center;
Color<O> Col;
public:
Sphere(O radius, Point3<O> center, Color<O> color);
O quadratic(Ray<O> ray_in, O &disc, O t_Min, O t_Max);
bool intersect(Ray<O> ray_in, rayInfo<O> &info, O t_Max);
};
template<typename O>
Sphere<O>::Sphere(O radius, Point3<O> center, Color<O> color) : Radius(radius), Center(center), Col(color) {}
template<typename O>
O Sphere<O>::quadratic(Ray<O> ray_in, O &disc, O t_Min, O t_Max){
Point3<O> origin = ray_in.Origin;
Vec3<O> direction = ray_in.Direction;
Vec3<O> o = origin-Center;
O a = direction.dot(direction);
O b = 2 * direction.dot(o);
O c = o.dot(o) - (Radius * Radius);
O discriminant = b * b - 4 * (a * c);
if (discriminant < 0){
return false;
}
disc = ((-b - sqrt(discriminant)) / (2 * a));
if (disc > t_Max || t_Min > disc){
disc = ((-b + sqrt(discriminant)) / (2 * a));
if (disc > t_Max || t_Min > disc){
return false;
}
}
return true;
}
template<typename O>
bool Sphere<O>::intersect(Ray<O> ray_in, rayInfo<O> &info, O t_Max){
O disc;
if (quadratic(ray_in, disc, info.Min, t_Max)){
Point3<O> p = ray_in.at(disc);
Vec3<O> normal = (p - Center) / Radius;
info.Point = p;
info.Normal = normal;
info.front_face();
info.Min = disc;
return true;
}
else{
return false;
}
}
Tracer class:
#pragma once
#include <iostream>
#include "Shapes.h"
#include "Defs.h"
#include "Image.h"
template<typename O>
class Tracer{
std::vector<Sphere<O>> Shapes;
public:
Tracer(std::vector<Sphere<O>> shapes);
void iterator(Ray<O> &ray, O &depth, O t_Max, O t_Min);
};
template<typename O>
Tracer<O>::Tracer(std::vector<Sphere<O>> shapes) : Shapes(shapes) {}
template<typename O>
void Tracer<O>::iterator(Ray<O> &ray, O &depth, O t_Max, O t_Min){
O conc = 1;
Color<O> col(0.4f, 0.8f, 0.9f);
bool hit = false;
rayInfo<O> info;
info.Min = t_Min;
if (depth <= 0)
conc = 0;
while (depth > 0){
for (auto i = Shapes.begin(); i != Shapes.end(); i++){
if (i->intersect(ray, info, t_Max)){
conc *= 0.28;
hit = true;
}
}
if (!hit){
break;
}
Vec3<O> circ = Vec3<O>::random_in_unit_sphere();
Point3<O> target = info.Point + info.Normal + circ;
ray = Ray<O>(info.Point, target - info.Point);
info.Min = t_Min;
hit = false;
depth--;
}
col = col * conc;
Image<O>::ColorPixel(std::cout, col);
}
And main just in case:
#include <iostream>
#include <cmath>
#include "../Matrix.h"
#include "Camera.h"
#include <vector>
#include "Image.h"
#include "Shapes.h"
#include "Tracer.h"
#include "../Defs.h"
template<typename O>
using Point3 = Vec3<O>;
template<typename O>
using Color = Vec3<O>;
int main(){
const int img_width = 640;
const int img_height = 480;
const float img_ratio = img_width/img_height;
float t_Max = infinity; float t_Min = 0.01; float depth = 50.0f;
float inv_width = 1 / float(img_width);
float inv_height = 1 / float(img_height);
std::vector<Sphere<float>> shapes;
Camera<float> cam1(40.0f, img_ratio, Point3<float>(0.0f, 0.0f, 0.0f), Point3<float>(0.0f, 0.0f, -1.0f), Vec3<float>(0.0f, 1.0f, 0.0f));
Sphere<float> cir1(0.4f, Point3<float>(0.0f, 0.0f, -1.0f), Color<float>(0.7f, 0.3f, 0.2f));
Sphere<float> cir2(3.0f, Point3<float>(0.0f, -3.0f, -1.0f), Color<float>(0.2f, 0.7f, 0.8f));
Sphere<float> cir3(0.5f, Point3<float>(1.0f, 0.0f, -1.0f), Color<float>(0.2f, 0.3f, 0.7f));
shapes.push_back(cir1);
shapes.push_back(cir2);
shapes.push_back(cir3);
Tracer<float> tracer(shapes);
std::cout << "P3\n" << img_width << ' ' << img_height << "\n255" << std::endl;
Ray<float> ray(Point3<float>(0.0f), Vec3<float>(0.0f));
for (int j = 0; j < img_height; j++)
{
std::cerr << "\rScanlines remaining: " << j << ' ' << std::flush;
for (int i = 0; i < img_width; i++){
depth = 50.0f;
float x = i;
float y = j;
cam1.screenCords(x, y, img_width, img_height);
ray = cam1.get_raydir(x, y);
//ray = Ray<float>(Vec3<float>(x1, y1, 1), Point3<float>(0.0f, 0.0f, 0.0f));
tracer.iterator(ray, depth, t_Max, t_Min);
}
}
std::cerr << "\n done " << std::endl;
}
Here's what it looks like right now:
Edit: When I attenuate outside the shapes loop (by moving `conc *= 0.28 outside of it) my image ends up looking something like this:
I can see something that looks like shadows but it's obviously not the intended behavior.
Edit 2:
As Yavok pointed out, setting info.Min to the vertex on every intersection hit is inverted logic. I should instead be decreasing info.Max so that the ray doesn't go all the way to objects further than the current closest.
I added anti-aliasing and gamma correction of 3 (cubic root) and the image looks much better now. A little strange still, but it's progress:
Edit 3:
It finally works! Turns out I had an error on my random_in_unit_sphere() function. It should look something like this:
static Vec3<T> random_in_unit_sphere(){
bool flag = true;
Vec3<T> p;
while (flag){
p = randomm(-1, 1);
auto l = p.length();
if (l * l < 1) { flag = false; }
}
return p;
}
Thanks to Yakov and Spektre! Much appreciated.

The noise is there because you randomize your diffuse rays:
Vec3<O> circ = Vec3<O>::random_in_unit_sphere();
Each time your ray hits something, you attenuate the color:
conc *= 0.28;
Obviously, some rays are going to bounce more than others, and, accordingly get darker.
This noise is an expected artifact of any monte-carlo integrator. To reduce the noise you need to increase the number of samples per pixel and apply a de-noiser in the very end.
The "transparency" is there because you apply that attenuation within the intersection loop:
for (auto i = Shapes.begin(); i != Shapes.end(); i++){
if (i->intersect(ray, info, t_Max)){
conc *= 0.28; // <-- here
hit = true;
}
}
A ray that intersects multiple spheres will be attenuated multiple times, even by spheres that should be obscured. Instead you have to attenuate outside the loop, when you've figured out that your ray bounces:
if (!hit){
break;
}
conc *= 0.28; // <-- apply attenuation once per bounce

Too lazy to debug your code however the screenshot and just a quick look at source hints accuracy problems. So try to use 64bit doubles instead of 32 bit floats...
Intersection between ray and ellipsoid/sphere tend to be noisy on just floats... once refraction and reflection is added on top of that the noise multiplies ...
Also sometimes helps using relative coordinates instead of absolute ones (that can make a huge impact even on floats). For more info see:
ray and ellipsoid intersection accuracy improvement
raytrace through 3D mesh

Related

(Ray tracing) Trouble converting to screen coordinates, objects being stretched

I followed along with Ray Tracing in One Weekend and managed to get the final output but I wanted to understand more about creating a camera and "painting" the screen since he didn't go over it much.
When I tried using a different way of creating a camera by spheres actually get elongated, making them look like more like ellipses. I've tried modifying the x and y assignments in screenCords but I've only managed to make more errors (such as objects wrapping around to the opposite side)
Camera.h:
#pragma once
#include "../Matrix.h"
#include "../Defs.h"
#include "Defs.h"
template<typename O>
using Point3 = Vec3<O>;
template<typename O>
using Color = Vec3<O>;
template <typename O>
class Camera{
O Height;
O Width;
Vec3<O> Forward, Right, Up;
Point3<O> Origin;
public:
Camera(O fov, O aspect_ratio, Point3<O> origin, Point3<O> target, Vec3<O> upguide) {
Height = atan(degrees_to_radians(fov));
Width = Height * aspect_ratio;
Origin = origin;
Forward = target - origin;
Forward.normalize();
Right = Forward.cross(upguide);
Right.normalize();
Up = Right.cross(Forward);
}
Ray<O> get_raydir(O right, O up){
Vec3<O> result(Forward + right * Width * Right + up * Height * Up); result.normalize();
return Ray<O>(Origin, result);
}
void screenCords(O &x, O &y, O width, O height){
x = ((2.0f * x) / width) -1.0f;
y = ((2.0f * y) / height);
}
};
Main.cpp
#include <iostream>
#include <cmath>
#include "../Matrix.h"
#include "Camera.h"
#include <vector>
#include "Image.h"
#include "Shapes.h"
#include "Tracer.h"
#include "../Defs.h"
template<typename O>
using Point3 = Vec3<O>;
template<typename O>
using Color = Vec3<O>;
int main(){
const int img_ratio = 2;
const int img_width = 640;
const int img_height = 480;
const int depth = 50; float t_Max = infinity; float t_Min = 0.001;
float inv_width = 1 / float(img_width);
float inv_height = 1 / float(img_height);
std::vector<Sphere<float>> shapes;
Camera<float> cam1(20.0f, img_ratio, Point3<float>(0.0f, 0.0f, 0.0f), Point3<float>(0.0f, 0.0f, -1.0f), Vec3<float>(0.0f, 1.0f, 0.0f));
Sphere<float> cir1(0.2f, Point3<float>(0.2f, 0.0f, -1.0f));
Sphere<float> cir2(7.0f, Point3<float>(0.0f, -7.0f, -1.0f));
Sphere<float> cir3(0.5f, Point3<float>(1.0f, 0.0f, -1.0f));
shapes.push_back(cir1);
//shapes.push_back(cir2);
//shapes.push_back(cir3);
Tracer<float> tracer(shapes);
std::cout << "P3\n" << img_width << ' ' << img_height << "\n255" << std::endl;
Ray<float> ray(Point3<float>(0.0f), Vec3<float>(0.0f));
for (int j = 0; j < img_height; j++)
{
std::cerr << "\rScanlines remaining: " << j << ' ' << std::flush;
for (int i = 0; i < img_width; i++){
float x = i;
float y = j;
cam1.screenCords(x, y, img_width, img_height);
ray = cam1.get_raydir(x, y);
//ray = Ray<float>(Vec3<float>(x1, y1, 1), Point3<float>(0.0f, 0.0f, 0.0f));
tracer.iterator(ray, depth, t_Max, t_Min);
}
}
std::cerr << "\n done " << std::endl;
}
I suspect the error is in one of these files since the spheres are actually being drawn with the colors based on normals (with the top and bottom normal colors unsurprisingly being bugged)
Here are a few examples of the output:
You shall define
const float img_ratio = (float)img_width/img_height;
Which, for a 640x480 image, would be 1.333 rather than 2 as in your code.
Also in screenCords you subtract 1.0f from x but not from y. It creates a tilt-shift effect.

breakout collision detection using vectors

I am working on a breakout game using C++ and OpenGL. I have stubbed out some code using straight C++. I am using a vector to store objects.
I want to streamline my collision detection code.I am working on breakout game using vectors to store brick values in text based c++ to start with. Well I am attempting to use a vector as a dynamic array. Basically I want to store the values of the bricks in a vector. I then want to iterate through the vector in order to determine if a brick is hit by the ball. I am able to detect if a single brick is hit by the ball but I want to determine if a vector of brick values are hit by a ball. In other words instead of using a collision detection loop for each and every single brick one at a time but I want the vector to store the values for the bricks so that I can iterate through them dynamically.
My code:
class Brick
{
public:
float x;
float y;
float brick_x;
float brick_y;
float brickWidth;
float brickHeight;
};
int main()
{
vector<Brick> brick;
Brick playerBrick;
Brick playerBrick_two;
playerBrick.x = 30.0f;
playerBrick.y = 20.0f;
playerBrick.brick_x = 20.0f;
playerBrick.brick_y = 10.0f;
playerBrick.brickWidth = 60.0f;
playerBrick.brickHeight = 20.0f;
playerBrick_two.x = 40.0f;
playerBrick_two.y = 30.0f;
playerBrick_two.brick_x = 30.0f;
playerBrick_two.brick_y = 20.0f;
playerBrick_two.brickWidth = 60.0f;
playerBrick_two.brickHeight = 20.0f;
brick.push_back(playerBrick);
brick.push_back(playerBrick_two);
for (int i = 0; i < 2; i++)
{
cout << brick[i].x << " " << brick[i].y << " " << brick[i].brick_x << " " << brick[i].brick_y << " " << brick[i].brickWidth << " " << brick[i].brickHeight << endl;
}
for (int i = 0; i < 2; i++)
{
if (brick[i].x > brick[i].brick_x && brick[i].x < brick[i].brick_x + brick[i].brickWidth && brick[i].y > brick[i].brick_y && brick[i].y < brick[i].brick_y + brick[i].brickHeight)
{
cout << "Collision" << endl;
}
}
void bricks_eight()
{
glColor3f(0.8f, 0.0f, 0.0f);
glRectf(50.0f, 60.0f, 70.0f, 50.0f);
glRectf(70.0f, 60.0f, 90.0f, 50.0f);
glRectf(90.0f, 60.0f, 110.0f, 50.0f);
glRectf(110.0f, 60.0f, 130.0f, 50.0f);
glRectf(130.0f, 60.0f, 150.0f, 50.0f);
glRectf(150.0f, 60.0f, 170.0f, 50.0f);
}
Well I am posting a row of bricks I want to eliminate when the ball strikes with them. Since I have several rows of bricks I want to use a collision detection function that checks for a ball and brick collision using a vector. The x and y variables are the ball coordinates and the brick_x and brick_y variables and the brick coordinates.
I adjusted Makogan's code but it still does not check for a lot collisions all at once.
``
class Brick
{
public:
float x;
float y;
float brick_x=0.0f;
float brick_y=0.0f;
float brickWidth=20.0f;
float brickHeight=10.0f;
bool TestCollision(float x, float y)
{
if (x > brick_x && x<brick_x + brickWidth && y > brick_y && y < brick_y + brickHeight)
{
return true;
}
return false;
}
};
class BrickLayer
{
public:
vector<Brick> bricks{(5.0f,5.0f)};
bool TestCollisions(float x, float y) {
for (auto& brick : bricks) if (brick.TestCollision(x, y)) return true;
return false;
}
};
int main()
{
BrickLayer brick;
cout << brick.TestCollisions(5.0f,5.0f)<< endl;
system("pause");
return 0;
}
Based on the comments, it sounds like you just need a wrapper to abstract detecting collisions for a group / row of your bricks.
So my suggestion is
class Brick
{
public:
float x;
float y;
float brick_x;
float brick_y;
float brickWidth;
float brickHeight;
TestCollision(float x,float y) {/* check goes here */}
};
class BrickLayer
{
public:
vector<Brick> bricks;
float min_x;
float min_y;
float total_width;
float total_height;
BrickLayer(vector<Brick> bricks) {/* initialize the instance fields here */}
// Option 1, iterating over every brick:
bool TestCollisions(float x, float y) {
for(auto& brick : bricks) if(brick.testCollision(x,y)) return true;
return false;
}
// Option 2, use cached information:
bool TestCollisions(float x, float y) {
if (x > min_x && x < min_x + total_width && y > min_y && y < min_y + total_height)
return true;
}
};

OpenGL Sphere vertices and UV coordinates

I know there are many similar questions for this issue, such as this one, but I can't seem to figure out what is going wrong in my program.
I am attempting to create a unit sphere using the naive longitude/latitude method, then I attempt to wrap a texture around the sphere using UV coordinates.
I am seeing the classic vertical seam issue, but I'm also some strangeness at both poles.
North Pole...
South Pole...
Seam...
The images are from a sphere with 180 stacks and 360 slices.
I create it as follows.
First, here are a couple of convenience structures I'm using...
struct Point {
float x;
float y;
float z;
float u;
float v;
};
struct Quad {
Point lower_left; // Lower left corner of quad
Point lower_right; // Lower right corner of quad
Point upper_left; // Upper left corner of quad
Point upper_right; // Upper right corner of quad
};
I first specify a sphere which is '_stacks' high and '_slices' wide.
float* Sphere::generate_glTriangle_array(int& num_elements) const
{
int elements_per_point = 5; //xyzuv
int points_per_triangle = 3;
int triangles_per_mesh = _stacks * _slices * 2; // 2 triangles makes a quad
num_elements = triangles_per_mesh * points_per_triangle * elements_per_point;
float *buff = new float[num_elements];
int i = 0;
Quad q;
for (int stack=0; stack<_stacks; ++stack)
{
for (int slice=0; slice<_slices; ++slice)
{
q = generate_sphere_quad(stack, slice);
load_quad_into_array(q, buff, i);
}
}
return buff;
}
Quad Sphere::generate_sphere_quad(int stack, int slice) const
{
Quad q;
std::cout << "Stack " << stack << ", Slice: " << slice << std::endl;
std::cout << " Lower left...";
q.lower_left = generate_sphere_coord(stack, slice);
std::cout << " Lower right...";
q.lower_right = generate_sphere_coord(stack, slice+1);
std::cout << " Upper left...";
q.upper_left = generate_sphere_coord(stack+1, slice);
std::cout << " Upper right...";
q.upper_right = generate_sphere_coord(stack+1, slice+1);
std::cout << std::endl;
return q;
}
Point Sphere::generate_sphere_coord(int stack, int slice) const
{
Point p;
p.y = 2.0 * stack / _stacks - 1.0;
float r = sqrt(1 - p.y * p.y);
float angle = 2.0 * M_PI * slice / _slices;
p.x = r * sin(angle);
p.z = r * cos(angle);
p.u = (0.5 + ( (atan2(p.z, p.x)) / (2 * M_PI) ));
p.v = (0.5 + ( (asin(p.y)) / M_PI ));
std::cout << " Point: (x: " << p.x << ", y: " << p.y << ", z: " << p.z << ") [u: " << p.u << ", v: " << p.v << "]" << std::endl;
return p;
}
I then load my array, specifying vertices of two CCW triangles for each Quad...
void Sphere::load_quad_into_array(const Quad& q, float* buff, int& buff_idx, bool counter_clockwise=true)
{
if (counter_clockwise)
{
// First triangle
load_point_into_array(q.lower_left, buff, buff_idx);
load_point_into_array(q.upper_right, buff, buff_idx);
load_point_into_array(q.upper_left, buff, buff_idx);
// Second triangle
load_point_into_array(q.lower_left, buff, buff_idx);
load_point_into_array(q.lower_right, buff, buff_idx);
load_point_into_array(q.upper_right, buff, buff_idx);
}
else
{
// First triangle
load_point_into_array(q.lower_left, buff, buff_idx);
load_point_into_array(q.upper_left, buff, buff_idx);
load_point_into_array(q.upper_right, buff, buff_idx);
// Second triangle
load_point_into_array(q.lower_left, buff, buff_idx);
load_point_into_array(q.upper_right, buff, buff_idx);
load_point_into_array(q.lower_right, buff, buff_idx);
}
}
void Sphere::load_point_into_array(const Point& p, float* buff, int& buff_idx)
{
buff[buff_idx++] = p.x;
buff[buff_idx++] = p.y;
buff[buff_idx++] = p.z;
buff[buff_idx++] = p.u;
buff[buff_idx++] = p.v;
}
My vertex and fragment shaders are simple...
// Vertex shader
#version 450 core
in vec3 vert;
in vec2 texcoord;
uniform mat4 matrix;
out FS_INPUTS {
vec2 i_texcoord;
} tex_data;
void main(void) {
tex_data.i_texcoord = texcoord;
gl_Position = matrix * vec4(vert, 1.0);
}
// Fragment shader
#version 450 core
in FS_INPUTS {
vec2 i_texcoord;
};
layout (binding=1) uniform sampler2D tex_id;
out vec4 color;
void main(void) {
color = texture(tex_id, texcoord);
}
My draw command is:
glDrawArrays(GL_TRIANGLES, 0, num_elements/5);
Thanks!
First of all, this code does some funny extra work:
Point Sphere::generate_sphere_coord(int stack, int slice) const
{
Point p;
p.y = 2.0 * stack / _stacks - 1.0;
float r = sqrt(1 - p.y * p.y);
float angle = 2.0 * M_PI * slice / _slices;
p.x = r * sin(angle);
p.z = r * cos(angle);
p.u = (0.5 + ( (atan2(p.z, p.x)) / (2 * M_PI) ));
p.v = (0.5 + ( (asin(p.y)) / M_PI ));
return p;
}
Calling cos and sin just to cal atan2 on the result is just extra work in the best case, and in the worst case you might get the wrong branch cuts. You can calculate p.u directly from slice and slice instead.
The Seam
You are going to have a seam in your sphere. This is normal, most models will have a seam (or many seams) in their UV maps somewhere. The problem is that the UV coordinates should still increase linearly next to the seam. For example, think about a loop of vertices that go around the globe's equator. At some point, the UV coordinates will wrap around, something like this:
0.8, 0.9, 0.0, 0.1, 0.2
The problem is that you'll get four quads, but one of them will be wrong:
quad 1: u = 0.8 ... 0.9
quad 2: u = 0.9 ... 0.0 <<----
quad 3: u = 0.0 ... 0.1
quad 4: u = 0.1 ... 0.2
Look at how messed up quad 2 is. You will have to generate instead the following data:
quad 1: u = 0.8 ... 0.9
quad 2: u = 0.9 ... 1.0
quad 3: u = 0.0 ... 0.1
quad 4: u = 0.1 ... 0.2
A Fixed Version
Here is a sketch of a fixed version.
namespace {
const float pi = std::atan(1.0f) * 4.0f;
// Generate point from the u, v coordinates in (0..1, 0..1)
Point sphere_point(float u, float v) {
float r = std::sin(pi * v);
return Point{
r * std::cos(2.0f * pi * u),
r * std::sin(2.0f * pi * u),
std::cos(pi * v),
u,
v
};
}
}
// Create array of points with quads that make a unit sphere.
std::vector<Point> sphere(int hSize, int vSize) {
std::vector<Point> pt;
for (int i = 0; i < hSize; i++) {
for (int j = 0; j < vSize; j++) {
float u0 = (float)i / (float)hSize;
float u1 = (float)(i + 1) / (float)hSize;
float v0 = (float)j / (float)vSize;
float v1 = (float)(j + 1) / float(vSize);
// Create quad as two triangles.
pt.push_back(sphere_point(u0, v0));
pt.push_back(sphere_point(u1, v0));
pt.push_back(sphere_point(u0, v1));
pt.push_back(sphere_point(u0, v1));
pt.push_back(sphere_point(u1, v0));
pt.push_back(sphere_point(u1, v1));
}
}
}
Note that there is some easy optimization you could do, and also note that due to rounding errors, the seam might not line up quite correctly. These are left as an exercise for the reader.
More Problems
Even with the fixed version, you will likely see artifacts at the poles. This is because the screen space texture coordinate derivatives have a singularity at the poles.
The recommended way to fix this is to use a cube map texture instead. This will also greatly simplify the sphere geometry data, since you can completely eliminate the UV coordinates and you won't have a seam.
As a kludge, you can enable anisotropic filtering instead.

OpenGL Cone Function Not Drawing Correctly

I found an example online that shows how to draw a cone in OpenGL, which is located here: It was written in C++, and so I translated it to C#. Here is the new code:
public void RenderCone(Vector3 d, Vector3 a, float h, float rd, int n)
{
Vector3 c = new Vector3(a + (-d * h));
Vector3 e0 = Perp(d);
Vector3 e1 = Vector3.Cross(e0, d);
float angInc = (float)(360.0 / n * GrimoireMath.Pi / 180);
// calculate points around directrix
List<Vector3> pts = new List<Vector3>();
for (int i = 0; i < n; ++i)
{
float rad = angInc * i;
Vector3 p = c + (((e0 * (float)Math.Cos((rad)) + (e1 * (float)Math.Sin(rad))) * rd));
pts.Add(p);
}
// draw cone top
GL.Begin(PrimitiveType.TriangleFan);
GL.Vertex3(a);
for (int i = 0; i < n; ++i)
{
GL.Vertex3(pts[i]);
}
GL.End();
// draw cone bottom
GL.Begin(PrimitiveType.TriangleFan);
GL.Vertex3(c);
for (int i = n - 1; i >= 0; --i)
{
GL.Vertex3(pts[i]);
}
GL.End();
}
public Vector3 Perp(Vector3 v)
{
float min = Math.Abs(v.X);
Vector3 cardinalAxis = new Vector3(1, 0, 0);
if (Math.Abs(v.Y) < min)
{
min = Math.Abs(v.Y);
cardinalAxis = new Vector3(0, 1, 0);
}
if (Math.Abs(v.Z) < min)
{
cardinalAxis = new Vector3(0, 0, 1);
}
return Vector3.Cross(v, cardinalAxis);
}
I think I am using the parameters correctly(the page isnt exactly coherent in terms of actual function-usage). Here is the legend that the original creator supplied:
But when I enter in the following as parameters:
RenderCone(new Vector3(0.0f, 1.0f, 0.0f), new Vector3(1.0f, 1.0f, 1.0f), 20.0f, 10.0f, 8);
I receive this(Wireframe enabled):
As you can see, I'm missing a slice, either at the very beginning, or the very end. Does anyone know what's wrong with this method? Or what I could be doing wrong that would cause an incomplete cone?
// draw cone bottom
GL.Begin(PrimitiveType.TriangleFan);
GL.Vertex3(c);
for (int i = n - 1; i >= 0; --i)
{
GL.Vertex3(pts[i]);
}
GL.End();
That connects all vertices to each other and center but there is one connection missing. There is nothing the specifies connection from first to last vertex. Adding GL.Vertex3(pts[n-1]); after loop would add the missing connection.
The Solution was actually extremely simple, I needed to increase the number of slices by 1. Pretty special if you ask me.
public void RenderCone(Vector3 baseToApexLength, Vector3 apexLocation, float height, float radius, int slices)
{
Vector3 c = new Vector3(apexLocation + (-baseToApexLength * height));
Vector3 e0 = Perpendicular(baseToApexLength);
Vector3 e1 = Vector3.Cross(e0, baseToApexLength);
float angInc = (float)(360.0 / slices * GrimoireMath.Pi / 180);
slices++; // this was the fix for my problem.
/**
* Compute the Vertices around the Directrix
*/
Vector3[] vertices = new Vector3[slices];
for (int i = 0; i < vertices.Length; ++i)
{
float rad = angInc * i;
Vector3 p = c + (((e0 * (float)Math.Cos((rad)) + (e1 * (float)Math.Sin(rad))) * radius));
vertices[i] = p;
}
/**
* Draw the Top of the Cone.
*/
GL.Begin(PrimitiveType.TriangleFan);
GL.Vertex3(apexLocation);
for (int i = 0; i < slices; ++i)
{
GL.Vertex3(vertices[i]);
}
GL.End();
/**
* Draw the Base of the Cone.
*/
GL.Begin(PrimitiveType.TriangleFan);
GL.Vertex3(c);
for (int i = slices - 1; i >= 0; --i)
{
GL.Vertex3(vertices[i]);
}
GL.End();
}

Lighting in my ray tracer is working oddly

This is a ray tracer code I'm working on. When I tested it out, everything seemed to be working fine until I started changing the camera(view point) position. Here are some of the results:
campos(-60, 100, -30), lightPos(-70, 100, -30)
The light on the floor is cut off somehow.
campos(60, 100, -30), lightPos(-70, 100, -30)
This one shows the same problem.
campos(60, 30, -30), lightPos(-70, 100, -30)
The light in this screenshot seems to have two light sources although there's only one active at the moment.
campos(-70, 100, -30), lightPos(-70, 100, -30)
The final position is the last position I set on the code below. It's at the exact same location as the light sorce.
Why is the light creating shadows like that?
main.cpp
#include <iostream>
#include <algorithm>
#include <GL/glut.h>
#include <GL/gl.h>
#include <GL/glu.h>
#include <math.h>
#include <vector>
#include "Vector.h"
#include "Ray.h"
#include "Camera.h"
#include "Color.h"
#include "Light.h"
#include "Sphere.h"
#include "Plane.h"
#define PI 3.141592653589793
#define INFINITY 1e6
#define FOV 60
#define KA 0.2
#define KD 0.5
#define KS 5
VECTOR X = { 1,0,0 };
VECTOR Y = { 0,1,0 };
VECTOR Z = { 0,0,1 };
VECTOR O = { 0,0,0 };
Color white(1, 1, 1);
Color black(0, 0, 0);
Color greenC(0.5, 1, 0.5);
Color gray(0.5, 0.5, 0.5);
Color maroon(0.5, 0.25, 0.25);
unsigned int width = 640;
unsigned int height = 480;
using namespace std;
Color trace(Ray &ray, vector<Object*> objects, vector<Light*> lights)
{
float hit = INFINITY;
float closest = INFINITY;
Object* objectHit = NULL;
for (int i = 0; i < objects.size(); i++)
{
if (objects.at(i)->intersect(ray, hit))
{
if (hit < closest)
{
closest = hit;
objectHit = objects.at(i);
}
}
}
if (objectHit)
{
VECTOR hitP = ray.getOrigin() + ray.getDirction() * closest;
VECTOR hitN = objectHit->getNormal(hitP);
Color finalColor = objectHit->getColor() * objectHit->getKa(); //ambient color
for (int i = 0; i < lights.size(); i++)
{
VECTOR lightDir = lights.at(i)->getPos() - hitP;
float lightDist = lightDir.Magnitude();
lightDir.Normalize();
bool shadow = false;
Ray shadowRay(hitP, lightDir);
float angle = max(hitN.DotProduct(lightDir), 0.0f);
for (int j = 0; j < objects.size() && shadow == false; j++)
{
float p;
if (objects.at(j)->intersect(shadowRay, p) && objectHit != objects.at(j))
{
VECTOR objectDist = hitP + lightDir * p;
if (objectDist.Magnitude() <= lightDist)
shadow = true;
}
}
if (!shadow)
{
VECTOR h = ray.getDirction() + lightDir;
h.Normalize();
Color diffuse = lights.at(i)->getCol() * objectHit->getKd() * angle;
Color specular = lights.at(i)->getCol() * angle * pow(max(hitN.DotProduct(h), 0.0f), objectHit->getKs());
finalColor = finalColor + diffuse + specular;
}
}
return finalColor.clip();
}
else return black;
}
void Render(void)
{
glClear(GL_COLOR_BUFFER_BIT);
vector<Object*> objects;
int radius = 20;
Sphere sphere(O, radius, greenC, KA, KD, KS);
Plane plane(Y, VECTOR(0, -radius, 0), maroon, 0.3, 0.5, 0.01);
objects.push_back(&sphere);
objects.push_back(&plane);
float xx, yy;
Color *image = new Color[width*height];
Color *pixel = image;
VECTOR lightPos(-70, 100, -30);
Light light(lightPos, gray);
//Light l2(VECTOR(10, 10, -20), white);
vector<Light*> lights;
lights.push_back(&light);
//lights.push_back(&l2);
VECTOR camPos(-70, 100, -30);
VECTOR lookat(0, 0, 0);
VECTOR diff(camPos.getX() - lookat.getX(), camPos.getY() - lookat.getY(), camPos.getZ() - lookat.getZ());
VECTOR camDir = diff;
camDir.Normalize();
VECTOR camRight = Y.CrossProduct(camDir);
camRight.Normalize();
VECTOR camUp = camRight.CrossProduct(camDir).Negative();
Camera cam(camPos, camDir, camRight, camUp);
for (int x = 0; x < width; x++)
{
for (int y = 0; y < height; y++)
{
xx = -(double)(width / 2) + x + 0.5;
yy = -(double)(height / 2) + y + 0.5;
VECTOR ray_d = camRight*xx + camUp*yy + camDir;
VECTOR ray_origin = camPos;
VECTOR ray_dir = ray_d - ray_origin;
ray_dir.Normalize();
Ray ray(ray_origin, ray_dir);
*(pixel++) = trace(ray, objects, lights);
float red = image[x*height + y].getRed();
float green = image[x*height + y].getGreen();
float blue = image[x*height + y].getBlue();
glColor3f(red, green, blue);
glBegin(GL_POINTS);
glVertex2i(x, y);
glEnd();
}
}
glutSwapBuffers();
}
struct RGBtype
{
float r, g, b;
};
int main(int argc, char ** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(width, height);
glutCreateWindow("Ray tracer");
glClearColor(0.0, 0.0, 0.0, 0.0);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(0.0, width, 0.0, height);
glutDisplayFunc(Render);
glutMainLoop();
return 0;
}
Vector.h
#ifndef _VECTOR_H_
#define _VECTOR_H_
#include <math.h>
class VECTOR
{
private:
float x, y, z;
public:
VECTOR();
~VECTOR();
VECTOR(float, float, float);
float getX() { return x; }
float getY() { return y; }
float getZ() { return z; }
float Magnitude();
VECTOR CrossProduct(VECTOR);
float DotProduct(VECTOR);
VECTOR vecAdd(VECTOR);
VECTOR vecMul(float);
void Normalize();
VECTOR Negative();
VECTOR operator - (VECTOR);
VECTOR operator + (VECTOR);
VECTOR operator * (float);
};
VECTOR VECTOR::operator-(VECTOR v)
{
VECTOR result = (*this);
result.x -= v.getX();
result.y -= v.getY();
result.z -= v.getZ();
return result;
}
VECTOR VECTOR::operator+(VECTOR v)
{
VECTOR result = (*this);
result.x += v.getX();
result.y += v.getY();
result.z += v.getZ();
return result;
}
VECTOR VECTOR::operator*(float f)
{
return VECTOR(x*f, y*f, z*f);
}
VECTOR::VECTOR()
{
x = y = z = 0;
}
VECTOR::~VECTOR(){}
VECTOR::VECTOR(float xPos, float yPos, float zPos)
{
x = xPos;
y = yPos;
z = zPos;
}
float VECTOR::Magnitude()
{
return sqrt(x * x + y * y + z * z);
}
float VECTOR::DotProduct(VECTOR v)
{
return (x * v.getX() + y * v.getY() + z * v.getZ());
}
VECTOR VECTOR::CrossProduct(VECTOR v)
{
VECTOR result;
result.x = y * v.getZ() - z * v.getY();
result.y = z * v.getX() - x * v.getZ();
result.z = x * v.getY() - y * v.getX();
return result;
}
VECTOR VECTOR::vecAdd(VECTOR v)
{
return VECTOR(x + v.getX(), y + v.getY(), +z + v.getZ());
}
VECTOR VECTOR::vecMul(float f)
{
return VECTOR(x*f, y*f, z*f);
}
void VECTOR::Normalize()
{
float w = Magnitude();
if (w < 0.00001) return;
x /= w;
y /= w;
z /= w;
}
VECTOR VECTOR::Negative()
{
return VECTOR( -x,-y,-z );
}
#endif // !_VECTOR_H_#pragma once
Ray.h
#ifndef _RAY_H_
#define _RAY_H_
#include "Vector.h"
class Ray
{
private:
VECTOR origin, direction;
public:
Ray();
~Ray();
Ray(VECTOR, VECTOR);
VECTOR getOrigin() { return origin; }
VECTOR getDirction() { return direction; }
};
Ray::Ray()
{
origin = VECTOR { 0,0,0 };
direction = VECTOR { 1,0,0 };
}
Ray::~Ray() {}
Ray::Ray(VECTOR o, VECTOR d)
{
origin = o;
direction = d;
}
#endif // !_Ray_H_#pragma once
Camera.h
#ifndef _CAMERA_H_
#define _CAMERA_H_
#include "Vector.h"
class Camera
{
private:
VECTOR camPos, camDir, camRight, camUp;
public:
Camera();
~Camera();
Camera(VECTOR, VECTOR, VECTOR, VECTOR);
VECTOR getCamPos() { return camPos; }
VECTOR getCamDir() { return camDir; }
VECTOR getCamRight() { return camRight; }
VECTOR getcamUp() { return camUp; }
};
Camera::Camera()
{
camPos = VECTOR{ 0,0,0 };
camDir = VECTOR{ 0,0,1 };
camRight = VECTOR{ 0,0,0 };
camUp = VECTOR{ 0,0,0 };
}
Camera::~Camera() {}
Camera::Camera(VECTOR pos, VECTOR dir, VECTOR right, VECTOR down)
{
camPos = pos;
camDir = dir;
camRight = right;
camUp = down;
}
#endif // !_CAMERA_H_#pragma once
Color.h
#ifndef _COLOR_H_
#define _COLOR_H_
#include "Vector.h"
class Color
{
private:
double red, green, blue;
public:
Color();
~Color();
Color(double, double, double);
double getRed() { return red; }
double getGreen() { return green; }
double getBlue() { return blue; }
void setRed(double r) { red = r; }
void setGreen(double g) { green = g; }
void setBlue(double b) { blue = b; }
double brightness() { return (red + green + blue) / 3; }
Color average(Color c) { return Color((red + c.getRed()) / 2, (green + c.getGreen()) / 2, (blue + c.getBlue()) / 2); }
Color operator * (double);
Color operator + (Color);
Color operator * (Color);
Color clip()
{
float sum = red + green + blue;
float extra = sum - 3;
if (extra > 0)
{
red = red + extra * (red / sum);
green = red + extra * (green / sum);
blue = red + extra * (blue / sum);
}
if (red > 1) { red = 1; }
if (green > 1) { green = 1; }
if (blue > 1) { blue = 1; }
if (red < 0) { red = 0; }
if (green < 0) { green = 0; }
if (blue < 0) { blue = 0; }
return Color(red, green, blue);
}
};
Color Color::operator * (double c) { return Color(red*c, green*c, blue*c); }
Color Color::operator + (Color c) { return Color(red + c.getRed(), green + c.getGreen(), blue + c.getBlue()); }
Color Color::operator * (Color c) { return Color(red*c.getRed(), green*c.getGreen(), blue*c.getBlue()); }
Color::Color()
{
red = green = blue = 1;
}
Color::~Color() {}
Color::Color(double r, double g, double b)
{
red = r;
green = g;
blue = b;
}
#endif // !_COLOR_H_#pragma once
Light.h
#ifndef _LIGHT_H_
#define _LIGHT_H_
#include "Vector.h"
#include "Color.h"
class Light
{
private:
VECTOR position;
Color color;
public:
Light();
~Light();
Light(VECTOR, Color);
virtual VECTOR getPos() { return position; }
virtual Color getCol() { return color; }
};
Light::Light()
{
position = VECTOR(0, 0, 0);
color = Color(1,1,1);
}
Light::~Light() {}
Light::Light(VECTOR v, Color c)
{
position = v;
color = c;
}
#endif // !_LIGHT_H_#pragma once
Sphere.h
#ifndef _SPHERE_H_
#define _SPHERE_H_
#include <math.h>
#include "Vector.h"
#include "Color.h"
#include "Object.h"
class Sphere : public Object
{
private:
VECTOR center;
float radius;
Color color;
float ka, kd, ks;
public:
Sphere();
~Sphere();
Sphere(VECTOR, float, Color, float, float, float);
float getKa() { return ka; }
float getKd() { return kd; }
float getKs() { return ks; }
VECTOR getCenter() { return center; }
float getRadius() { return radius; }
Color getColor() { return color; }
VECTOR getNormal(VECTOR &v)
{
VECTOR a = v - center;
a.Normalize();
return a;
}
bool intersect(Ray &ray, float &t)
{
float t0, t1;
float radius2 = radius * radius; //radius squared
VECTOR line = center - ray.getOrigin(); //vector from ray origin to sphere center
float ray_t = line.DotProduct(ray.getDirction()); //the current ray vector
if (ray_t < 0)
return false;
float d2 = line.DotProduct(line) - (ray_t * ray_t); //d2 + t2 = line2 by pythagorian theorm
if (d2 > radius2) //if larger than the radius, then the ray doesn't intersect with sphere
return false;
float ray_i = sqrt(radius2 - d2); //part of ray that is going through the sphere
t0 = ray_t - ray_i; //first sphere vertex along the ray
t1 = ray_t + ray_i; //second sphere vertex
if (t0 > t1)
{
float tmp = t0;
t0 = t1;
t1 = t0;
}
if (t0 < 0)
{
t0 = t1;
t = t0;
if (t0 < 0) return false;
}
t = t0;
return true;
}
};
Sphere::Sphere()
{
center = VECTOR(0, 0, 0);
radius = 1;
color = Color(1, 1, 1);
}
Sphere::~Sphere() {}
Sphere::Sphere(VECTOR v, float r, Color c, float a, float d, float s)
{
center = v;
radius = r;
color = c;
ka = a;
kd = d;
ks = s;
}
#endif // !_SPHERE_H_#pragma once
Object.h
#ifndef _OBJECT_H_
#define _OBJECT_H_
#include "Ray.h"
#include "Vector.h"
#include "Color.h"
class Object
{
private:
VECTOR center;
Color color;
float ka, kd, ks;
public:
Object();
~Object();
virtual float getKa() = 0;
virtual float getKd() = 0;
virtual float getKs() = 0;
virtual VECTOR getCenter() = 0;
virtual Color getColor() = 0;
virtual VECTOR getNormal(VECTOR&) = 0;
virtual bool intersect(Ray&, float&) = 0;
};
Object::Object(){}
Object::~Object() {}
#endif // !_OBJECT_H_#pragma once
Plane.h
#ifndef _PLANE_H_
#define _PLANE_H_
#include <math.h>
#include<vector>
#include "Vector.h"
#include "Color.h"
#include "Object.h"
using namespace std;
class Plane : public Object
{
private:
VECTOR normal;
float width, height;
vector<VECTOR> vertice;
VECTOR center; //to be used in equation (p - p0) * n = 0 where p is the point of intersection and p0 is the center
Color color;
float ka, kd, ks;
public:
Plane();
~Plane();
Plane(VECTOR, VECTOR, Color, float, float, float);
float getKa() { return ka; }
float getKd() { return kd; }
float getKs() { return ks; }
VECTOR getNormal(VECTOR &point)
{
VECTOR a = normal;
a.Normalize();
return a;
}
VECTOR getCenter() { return center; }
Color getColor() { return color; }
bool intersect(Ray &ray, float &t)
{
VECTOR rayDir = ray.getDirction();
float ray_f = rayDir.DotProduct(normal);
//ray doesn't intersect or is parallel to the plane - ray-plane intersection
if (fabs(ray_f) < 1e-6)
return false;
else
{
VECTOR tmp = (center - ray.getOrigin());
float plane_f = normal.DotProduct(tmp);
//returns t in parametric equation of ray point = origin + t*direction
t = plane_f / ray_f;
return (t >= 0);
}
}
};
Plane::Plane()
{
normal = VECTOR(0, 1, 0);
center = VECTOR(0, 0, 0);
color = Color(0.5, 0.5, 0.5);
width = 500;
height = 500;
}
Plane::~Plane() {}
Plane::Plane(VECTOR v, VECTOR o, Color c, float a, float d, float s)
{
normal = v;
center = o;
color = c;
ka = a;
kd = d;
ks = s;
}
#endif // !_PLANE_H_#pragma once
This is an awful lot of code, so I can only guess at what the problem is. Since the problem is in the not shadowed part of the image, the problem is in the calculation of either the diffuse or specular colors (or both). You could comment out each one individually to see what gives you the expected coloring, then further diagnose the problem from there.
The problem may be in your normalize method, which does not normalize really short vectors. This would cause the specular color to be off.