Ray tracing: making a plane for sphere - c++

I am working on an assignment where I need to ray trace a sphere with a plane (floor). I have the sphere but I am having trouble with the plane. I use the ray-plane intersection formula:
t = -(o-p).n / d.n. I have this in Plane.h, however when I run my code I get errors from Ray.h. Could someone explain what I'm doing wrong? Any help would be appreciated.
Plane.h
`#include "..\..\raytrace\Ray.h"
class Plane
{
using Colour = cv::Vec3b; // RGB Value
private:
Vec3 normal_;
Vec3 distance_;
Colour color_;
public:
Plane();
Plane(Vec3 norm, Vec3 dis, Colour color) : normal_(norm), distance_(dis), color_(color) {}
Vec3 norm() const {
return normal_;
}
Vec3 dis() const {
return distance_;
}
Colour color() const {
return color_;
}
float findIntersection(Ray ray) {
Vec3 rayDirection = ray.mPosition();
float denominator = rayDirection.dot(normal_);
if (denominator == 0) {
return false;
}
else {
//mPosition() is origin in Ray.h
float t = -(((ray.mPosition() - distance_)).dot(normal_)) / denominator;
}
}
};
`
Ray.h
#include <Eigen/Dense>
#include <Eigen/Geometry>
#include <cmath>
#include "Image.h"
// Data types
typedef float Scalar; //**custom datatype: Scalar is float
typedef Eigen::Matrix<Scalar, 3, 1> Vec3; //***Vec3 is a custom datatype (specific kind)
typedef Eigen::Matrix<Scalar, 2, 1> Vec2;
typedef unsigned char uchar;
class Ray
{
private:
Vec3 mPosition_; //point
public:
Ray() {}
//constuctor, when we construct ray we get mPosition_
Ray(Vec3 mPosition) : mPosition_(mPosition) {
//
}
float t;
Vec3 mPosition() const {
return mPosition_;
}
public:
inline Vec3 generateRay(Vec3 const& pt) {
Vec3 origin = mPosition_;
Vec3 direction = pt - mPosition_; // d = s -e, pt is pixel Position
direction.normalize();
return pt + t * direction;
}
};
main.cpp
#include <cmath>
#include "Image.h"
#include "Ray.h"
#include "../build/raytrace/Plane.h"
//Color functions
using Colour = cv::Vec3b; // RGB Value
//Color is a Vec3b datatype, use Color instead of Vec3b, it has 3 vectors, hold 3 values b/w 0-255
Colour red() { return Colour(255, 0, 0); }
Colour green() { return Colour(0, 255,0); }
Colour blue() { return Colour(0, 0, 255); }
Colour white() { return Colour(255, 255, 255); }
Colour black() { return Colour(0, 0, 0); }
//bounding the channel wise pixel color between 0 to 255
//bounding the color value, if a value is beyond 255 clamp it to 255, and any value below 0 clamp to 0.
uchar Clamp(int color)
{
if (color < 0) return 0;
if (color >= 255) return 255;
return color;
}
int main(int, char**){
//Create an image object with 500 x 500 resolution.
Image image = Image(500, 500);
//Coordinates of image rectangle
Vec3 llc = Vec3(-1, -1, -1); //**llc - lower left corner
Vec3 urc = Vec3(1, 1, -1); //**urc - upper right corner
int width = urc(0) - llc(0);
int height = urc(1) - llc(1);
Vec2 pixelUV = Vec2((float)width / image.cols, (float)height / image.rows);
/// TODO: define camera position (view point), sphere center, sphere radius (Weightage: 5%)
Vec3 CameraPoint = Vec3(0, 0, 0); //**it is the origin
Vec3 SphereCenter = Vec3(0, 0, -5); //**it is the Sphere Position
float SphereRadius = 2.0;
Vec3 LightSource = Vec3(2.0, 0.0, 3.0); //**
Vec3 ambient = Vec3(0, 0, 0.5); //**
Vec3 diffuse = Vec3(224, 180, 255); //** 0, 255, 100 - green
Vec3 Origin = CameraPoint;
//end
for (int row = 0; row < image.rows; ++row) {
for (int col = 0; col < image.cols; ++col) {
//TODO: Build primary rays
//Find the pixel position (PixelPos) for each row and col and then construct the vector PixelPos-Origin
Vec3 pixelPos = Vec3(llc(0) + pixelUV(0) * (col + 0.5), llc(1) + pixelUV(1) * (row + 0.5), -1);
//create a ray object
Ray r; //**
//Vec3 rayDir = pixelPos - Origin; //**direction of the ray
Vec3 rayDir = r.generateRay(pixelPos); //**pixelPos-Origin
rayDir.normalize(); //**normalize the ray direction vector
//Ray-sphere intersection...(refer to the lecture slides and Section 4.4.1 of the textbook)
float a = rayDir.dot(rayDir);
Vec3 s0_r0 = Origin - SphereCenter; //***s0_r0 - sphere center - ray origin
float b = 2.0 * rayDir.dot(s0_r0);
float c = s0_r0.dot(s0_r0) - pow(SphereRadius, 2);
//compute the discriminant
float discriminant = pow(b, 2) - 4 * a * c;
//if the discriminant is greater than zero
if(discriminant > 0){
//find roots t1 and t2
float t1 = (-b - sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
float t2 = (-b + sqrt((pow(b, 2)) - 4.0 * a * c)) / (2.0 * a); //**
//determine which one is the real intersection point
float t;
//Sphere s;
if (t1 < t2 && (t1 > 0 && t2 > 0)) {
t = t1;
//} //should this be after the if-statement below, so that it uses t = t1 and not just float t.????
if (t > 0) {
//Shade the pixel, normal is Intersection - SphereCenter, LightVector is LightSource- Intersection, make sure to normalize the vectors
Vec3 Intersection = Origin + (t * rayDir);
Vec3 Normal = Intersection - SphereCenter; //** normalize
Normal.normalize(); //**
Vec3 LightVector = LightSource - Intersection; //**normalize
LightVector.normalize(); //**
float diffuseTerm = LightVector.dot(Normal);
if (diffuseTerm < 0) diffuseTerm = 0;
Colour colour(0, 0, 0); //The ambient base
colour[0] = Clamp(ambient[0] + diffuse[0] * diffuseTerm);
colour[1] = Clamp(ambient[1] + diffuse[1] * diffuseTerm);
colour[2] = Clamp(ambient[2] + diffuse[2] * diffuseTerm);
image(row, col) = colour;
}
}//
else {
image(row, col) = black();
}
} else {
//No intersection, discriminant < 0
image(row, col) = red(); //**makes blue background colour
}
////**Plane intersection
//create a plane object
Plane plane(Vec3(-5, 0, -4), Vec3(0, 0, -1), black());
//Plane plane;
////if ray hits plane -> color black
//if (plane.findIntersection(rayDir) == 1) {
// image(row, col) = black();
//}
//else {
// image(row, col) = white();
//}
}
}
/// Required outputs: (1) Ray traced image of a sphere (2) Ray traced image when the camera is placed inside the sphere (complete black)
image.save("./result.png");
image.display();
return EXIT_SUCCESS;
}
Errors
enter image description here

#include is a shockingly simple directive. It literally just copy-pastes the content of the file.
main.cpp includes both Ray.h and Plane.h, and Plane.h includes Ray.h, so Ray.h ends up being included twice. That's why the compiler is complaining about a "class redefinition".
You can add #pragma once at the top of all your header files to let the compiler know know to skip the file if it was included already.
N.B. #pragma once is not officially part of the language, but it is supported by all compilers and has a few small advantages over the alternative.

Related

Raytracing program fails to detect intersections in c++

I am working on a simple raytracer in c++. I am currently implementing an intersection function but have encountered some issues.
For some reason, the collision detection only works for a tiny rectangle in my image. In the image below you can see that it draws the room quite fine for a small part of the screen but fails to do so for the rest of the scene. Only a small section gets drawn correctly.
Why does my intersection detection not work? I have included the code for the intersection and draw function below.
LoadTestModel(m_Model);
m_Light.position = glm::vec3(0.0f, -1.0f, 0.0);
m_Light.color = glm::vec3(0.f, 0.f, 0.f);
m_Light.ambient = glm::vec3(0.5f, 0.5f, 0.5f);
m_Camera.position = glm::vec3(0.0, 0.0, -2.0);
m_Camera.yaw = 0.0f;
}
void Lab2Scene::Draw(Window& window)
{
if (!m_RenderNext) return;
m_RenderNext = false;
for (uint32_t y = 0; y < window.GetHeight(); ++y)
{
for (uint32_t x = 0; x < window.GetWidth(); ++x)
{
Ray ray = {};
glm::vec3 d(x - (window.GetWidth() / 2), y - (window.GetHeight() / 2), (window.GetHeight() / 2));
d = glm::normalize(d);
ray.direction = d * m_Camera.GetRotationY();
ray.start = m_Camera.position;
// Find the closest intersection of the casted ray.
Intersection nearest_intersection = {};
if (ClosestIntersection(ray, m_Model, nearest_intersection))
{
//window.PutPixel(x, y, glm::vec3(1.f, 0.f, 0.f));
window.PutPixel(x, y, DirectLight(m_Light, nearest_intersection, m_Model) + m_Model[nearest_intersection.triangleIndex].color * m_Light.ambient); // DirectLight(m_Light, intersection, m_Model)
}
else
{
window.PutPixel(x, y, m_Light.color);
}
}
}
}
bool Lab2Scene::ClosestIntersection(const Ray& ray, const std::vector<Triangle>& triangles, Intersection& intersection)
{
float m = std::numeric_limits<float>::max();
intersection.distance = m;
bool inters = false;
for (int i = 0; i < triangles.size(); ++i) {
float dot = glm::dot(ray.direction, triangles[i].normal);
if (dot != 0) {
using glm::vec3;
using glm::mat3;
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
vec3 e1 = v1 - v0;
vec3 e2 = v2 - v0;
vec3 b = ray.start - v0;
mat3 A(-ray.direction, e1, e2);
vec3 x = glm::inverse(A) * b;
if (x[1] >= 0 && x[2] >= 0 && x[1] + x[2] <= 1 && x[0] >= 0) {
vec3 intersect = ray.start + (x[0] * ray.direction);
if (glm::distance(ray.start, intersect) <= intersection.distance) {
intersection.position = intersect;
intersection.distance = glm::distance(ray.start, intersect);
intersection.triangleIndex = i;
inters = true;
}
}
}
}
return inters;
}

How to know IF a line segment intersects a triangle in 3d space?

I have a triangle defined by 3 points in 3d space. I also have a line segment defined by 2 points in 3d space. I want to know if they intersect. I don't really need to know the point of intersection.
I don't know any calculus but I know some trig. I know some about matrices but I understand vectors well (3d vectors specifically). Please keep it simple.
Can you walk me through the example problem:
triangle:
a: -4, 3, 0
b: 4, 3, 0
c: -3, -5, 4
line segment:
d: 1, -2, 0
e: -2, 6, 2
EDIT:
I am going to use this in a c++ physics engine.
One answer involved tetrahedron volume calculation from 4 vertices. Please provide formula or show it in code.
UPDATE:
As meowgoesthedog pointed out, I could try to use the Moller-Trumbore intersection algorithm. See my answer below for an alternate solution.
Here is one way to solve your problem. Compute the volume of the tetrahedron Td =
(a,b,c,d) and Te = (a,b,c,e). If either volume of Td or Te is zero, then one endpoint of the
segment de lies on the plane containing triangle (a,b,c). If the volumes of Td and Te have the same sign,
then de lies strictly to one side, and there is no intersection. If Td and Te have opposite
signs, then de crosses the plane containing (a,b,c).
From there there are several strategies. One is to compute the point p where de crosses
that plane. Then project down to 2D, and solve the point-in-triangle problem in 2D.
Another route is to compute the volumes of the tetrahedra (a,b,d,e), (b,c,d,e), and (c,a,d,e). Then only if all three have the same sign, does de intersect the triangle (a,b,c).
How to compute the volume of a tetrahedron from its corner coordinates is all over the
web, and also in Computational Geometry in C.
I implemented the great answer that Joseph gave in python and thought I would share. The function takes a set of line segments and triangles and computes for each line segment if it intersects any of the given triangles.
The first input to the function is a 2xSx3 array of line segments where the first index specifies the start or end point of the segment, the second index refers to the s^th line segment, and the third index points to the x, y,z coordinate of the line segment point.
The second input is a 3XTX3 array of triangle vertices, where the first index specifies one of the three vertices (which don't have to be in any particular order), the second index refers to the t^th triangle, and the third index points to the x,y,z coordinates the the triangle vertex.
The output of this function is a binary array of size S which tells you whether the s^th line segment intersects any of the triangles given. If you want to know which triangles the segments intersect, then just remove the summation of the last line of the function.
def signedVolume(a, b, c, d):
"""Computes the signed volume of a series of tetrahedrons defined by the vertices in
a, b c and d. The ouput is an SxT array which gives the signed volume of the tetrahedron defined
by the line segment 's' and two vertices of the triangle 't'."""
return np.sum((a-d)*np.cross(b-d, c-d), axis=2)
def segmentsIntersectTriangles(s, t):
"""For each line segment in 's', this function computes whether it intersects any of the triangles
given in 't'."""
# compute the normals to each triangle
normals = np.cross(t[2]-t[0], t[2]-t[1])
normals /= np.linalg.norm(normals, axis=1)[:, np.newaxis]
# get sign of each segment endpoint, if the sign changes then we know this segment crosses the
# plane which contains a triangle. If the value is zero the endpoint of the segment lies on the
# plane.
# s[i][:, np.newaxis] - t[j] -> S x T x 3 array
sign1 = np.sign(np.sum(normals*(s[0][:, np.newaxis] - t[2]), axis=2)) # S x T
sign2 = np.sign(np.sum(normals*(s[1][:, np.newaxis] - t[2]), axis=2)) # S x T
# determine segments which cross the plane of a triangle. 1 if the sign of the end points of s is
# different AND one of end points of s is not a vertex of t
cross = (sign1 != sign2)*(sign1 != 0)*(sign2 != 0) # S x T
# get signed volumes
v1 = np.sign(signedVolume(t[0], t[1], s[0][:, np.newaxis], s[1][:, np.newaxis])) # S x T
v2 = np.sign(signedVolume(t[1], t[2], s[0][:, np.newaxis], s[1][:, np.newaxis])) # S x T
v3 = np.sign(signedVolume(t[2], t[0], s[0][:, np.newaxis], s[1][:, np.newaxis])) # S x T
same_volume = np.logical_and((v1 == v2), (v2 == v3)) # 1 if s and t have same sign in v1, v2 and v3
return (np.sum(cross*same_volume, axis=1) > 0)
Thanks for the help! This is an alternate solution. The question was for c++ and as meowgoesthedog pointed out, I could try to use the Moller-Trumbore intersection algorithm. This is what I came up with:
#include <math.h>
class vec3 {
public:
float x, y, z;
float dot(const vec3 & b) {
return vec3::x * b.x + vec3::y * b.y + vec3::z * b.z;
}
vec3 cross(const vec3 & b) {
return vec3::vec3(
vec3::y * b.z - vec3::z * b.y,
vec3::z * b.x - vec3::x * b.z,
vec3::x * b.y - vec3::y * b.x
);
}
vec3 normalize() {
const float s = 1.0f / sqrtf(vec3::x * vec3::x + vec3::y * vec3::y + vec3::z * vec3::z);
return vec3::vec3(vec3::x * s, vec3::y * s, vec3::z * s);
}
vec3 operator+(const vec3 & b) {
return vec3::vec3(
vec3::x + b.x,
vec3::y + b.y,
vec3::z + b.z
);
}
vec3 operator+=(const vec3 & b) {
*this = vec3::operator+(b);
return *this;
}
vec3 operator-(const vec3 & b) {
return vec3::vec3(
vec3::x - b.x,
vec3::y - b.y,
vec3::z - b.z
);
}
vec3 operator-=(const vec3 & b) {
*this = vec3::operator-(b);
return *this;
}
vec3 operator*(const vec3 & b) {
return vec3::vec3(
vec3::x * b.x,
vec3::y * b.y,
vec3::z * b.z
);
}
vec3 operator*=(const vec3 & b) {
*this = vec3::operator*(b);
return *this;
}
vec3 operator*(float b) {
return vec3::vec3(
vec3::x * b,
vec3::y * b,
vec3::z * b
);
}
vec3 operator*=(float b) {
*this = vec3::operator*(b);
return *this;
}
vec3 operator/(const vec3 & b) {
return vec3::vec3(
vec3::x / b.x,
vec3::y / b.y,
vec3::z / b.z
);
}
vec3 operator/=(const vec3 & b) {
*this = vec3::operator/(b);
return *this;
}
vec3 operator/(float b) {
return vec3::vec3(
vec3::x * b,
vec3::y * b,
vec3::z * b
);
}
vec3 operator/=(float b) {
*this = vec3::operator/(b);
return *this;
}
vec3(float x, float y, float z) {
vec3::x = x;
vec3::y = y;
vec3::z = z;
}
vec3(float x) {
vec3::x = x;
vec3::y = x;
vec3::z = x;
}
vec3() {
//
}
~vec3() {
//
}
};
#define EPSILON 0.000001f
bool lineSegIntersectTri(
vec3 line[2],
vec3 tri[3],
vec3 * point
) {
vec3 e0 = tri[1] - tri[0];
vec3 e1 = tri[2] - tri[0];
vec3 dir = line[1] - line[0];
vec3 dir_norm = dir.normalize();
vec3 h = dir_norm.cross(e1);
const float a = e0.dot(h);
if (a > -EPSILON && a < EPSILON) {
return false;
}
vec3 s = line[0] - tri[0];
const float f = 1.0f / a;
const float u = f * s.dot(h);
if (u < 0.0f || u > 1.0f) {
return false;
}
vec3 q = s.cross(e0);
const float v = f * dir_norm.dot(q);
if (v < 0.0f || u + v > 1.0f) {
return false;
}
const float t = f * e1.dot(q);
if (t > EPSILON && t < sqrtf(dir.dot(dir))) { // segment intersection
if (point) {
*point = line[0] + dir_norm * t;
}
return true;
}
return false;
}
For running a few tests:
#include <stdio.h>
const char * boolStr(bool b) {
if (b) {
return "true";
}
return "false";
}
int main() {
vec3 tri[3] = {
{ -1.0f, -1.0f, 0.0f },
{ 1.0f, -1.0f, 0.0f },
{ 1.0f, 1.0f, 0.0f },
};
vec3 line0[2] = { // should intersect
{ 0.5f, -0.5f, -1.0f },
{ 0.5f, -0.5f, 1.0f },
};
vec3 line1[2] = { // should not intersect
{ -0.5f, 0.5f, -1.0f },
{ -0.5f, 0.5f, 1.0f },
};
printf(
"line0 intersects? : %s\r\n"
"line1 intersects? : %s\r\n",
boolStr(lineSegIntersectTri(line0, tri, NULL)),
boolStr(lineSegIntersectTri(line1, tri, NULL))
);
return 0;
}
C# version:
public class AlgoritmoMollerTrumbore
{
private const double EPSILON = 0.0000001;
public static bool lineIntersectTriangle(Point3D[] line,
Point3D[] triangle,
out Point3D outIntersectionPoint)
{
outIntersectionPoint = new Point3D(0, 0, 0);
Point3D rayOrigin = line[0];
Vector3D rayVector = Point3D.Subtract(line[1], line[0]);
rayVector.Normalize();
Point3D vertex0 = triangle[0];
Point3D vertex1 = triangle[1];
Point3D vertex2 = triangle[2];
Vector3D edge1 = Point3D.Subtract(vertex1, vertex0);
Vector3D edge2 = Point3D.Subtract(vertex2, vertex0);
Vector3D h = Vector3D.CrossProduct(rayVector, edge2);
double a = Vector3D.DotProduct(edge1, h);
if (a > -EPSILON && a < EPSILON)
{
return false; // This ray is parallel to this triangle.
}
double f = 1.0 / a;
Vector3D s = Point3D.Subtract(rayOrigin, vertex0);
double u = f * (Vector3D.DotProduct(s, h));
if (u < 0.0 || u > 1.0)
{
return false;
}
Vector3D q = Vector3D.CrossProduct(s, edge1);
double v = f * Vector3D.DotProduct(rayVector, q);
if (v < 0.0 || u + v > 1.0)
{
return false;
}
// At this stage we can compute t to find out where the intersection point is on the line.
double t = f * Vector3D.DotProduct(edge2, q);
if (t > EPSILON && t < Math.Sqrt(Vector3D.DotProduct(rayVector, rayVector))) // ray intersection
{
outIntersectionPoint = rayOrigin + rayVector * t;
return true;
}
else // This means that there is a line intersection but not a ray intersection.
{
return false;
}
}
}

Billboarding C++

I got a code from my teacher that currently shows a 3D globe and a 2D particle system. The camera moves around in circles. The particle system is supposed to face the camera.
According to my lecture notes, I have to multiply the billboard with the inverse of the camera's view matrix. I would love to try that but I have trouble using the variable for the view matrix.
#include "pch.h"
#include <Kore/Application.h>
#include <Kore/IO/FileReader.h>
#include <Kore/Math/Core.h>
#include <Kore/Math/Random.h>
#include <Kore/System.h>
#include <Kore/Input/Keyboard.h>
#include <Kore/Input/Mouse.h>
#include <Kore/Audio/Mixer.h>
#include <Kore/Graphics/Image.h>
#include <Kore/Graphics/Graphics.h>
#include <Kore/Log.h>
#include "ObjLoader.h"
#include "Collision.h"
#include "PhysicsWorld.h"
#include "PhysicsObject.h"
using namespace Kore;
// A simple particle implementation
class Particle {
public:
VertexBuffer* vb;
IndexBuffer* ib;
mat4 M;
// The current position
vec3 position;
// The current velocity
vec3 velocity;
// The remaining time to live
float timeToLive;
// The total time time to live
float totalTimeToLive;
// Is the particle dead (= ready to be re-spawned?)
bool dead;
void init(const VertexStructure& structure) {
vb = new VertexBuffer(4, structure,0);
float* vertices = vb->lock();
SetVertex(vertices, 0, -1, -1, 0, 0, 0);
SetVertex(vertices, 1, -1, 1, 0, 0, 1);
SetVertex(vertices, 2, 1, 1, 0, 1, 1);
SetVertex(vertices, 3, 1, -1, 0, 1, 0);
vb->unlock();
// Set index buffer
ib = new IndexBuffer(6);
int* indices = ib->lock();
indices[0] = 0;
indices[1] = 1;
indices[2] = 2;
indices[3] = 0;
indices[4] = 2;
indices[5] = 3;
ib->unlock();
dead = true;
}
void Emit(vec3 pos, vec3 velocity, float timeToLive) {
position = pos;
this->velocity = velocity;
dead = false;
this->timeToLive = timeToLive;
totalTimeToLive = timeToLive;
}
Particle() {
}
void SetVertex(float* vertices, int index, float x, float y, float z, float u, float v) {
vertices[index* 8 + 0] = x;
vertices[index*8 + 1] = y;
vertices[index*8 + 2] = z;
vertices[index*8 + 3] = u;
vertices[index*8 + 4] = v;
vertices[index*8 + 5] = 0.0f;
vertices[index*8 + 6] = 0.0f;
vertices[index*8 + 7] = -1.0f;
}
void render(TextureUnit tex, Texture* image) {
Graphics::setTexture(tex, image);
Graphics::setVertexBuffer(*vb);
Graphics::setIndexBuffer(*ib);
Graphics::drawIndexedVertices();
}
void Integrate(float deltaTime) {
timeToLive -= deltaTime;
if (timeToLive < 0.0f) {
dead = true;
}
// Note: We are using no forces or gravity at the moment.
position += velocity * deltaTime;
// Build the matrix
M = mat4::Translation(position.x(), position.y(), position.z()) * mat4::Scale(0.2f, 0.2f, 0.2f);
}
};
class ParticleSystem {
public:
// The center of the particle system
vec3 position;
// The minimum coordinates of the emitter box
vec3 emitMin;
// The maximal coordinates of the emitter box
vec3 emitMax;
// The list of particles
Particle* particles;
// The number of particles
int numParticles;
// The spawn rate
float spawnRate;
// When should the next particle be spawned?
float nextSpawn;
ParticleSystem(int maxParticles, const VertexStructure& structure ) {
particles = new Particle[maxParticles];
numParticles = maxParticles;
for (int i = 0; i < maxParticles; i++) {
particles[i].init(structure);
}
spawnRate = 0.05f;
nextSpawn = spawnRate;
position = vec3(0.5f, 1.3f, 0.5f);
float b = 0.1f;
emitMin = position + vec3(-b, -b, -b);
emitMax = position + vec3(b, b, b);
}
void update(float deltaTime) {
// Do we need to spawn a particle?
nextSpawn -= deltaTime;
bool spawnParticle = false;
if (nextSpawn < 0) {
spawnParticle = true;
nextSpawn = spawnRate;
}
for (int i = 0; i < numParticles; i++) {
if (particles[i].dead) {
if (spawnParticle) {
EmitParticle(i);
spawnParticle = false;
}
}
particles[i].Integrate(deltaTime);
}
}
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V) {
Graphics::setBlendingMode(BlendingOperation::SourceAlpha, BlendingOperation::InverseSourceAlpha);
Graphics::setRenderState(RenderState::DepthWrite, false);
/************************************************************************/
/* Exercise 7 1.1 */
/************************************************************************/
/* Change the matrix V in such a way that the billboards are oriented towards the camera */
/************************************************************************/
/* Exercise 7 1.2 */
/************************************************************************/
/* Animate using at least one new control parameter */
for (int i = 0; i < numParticles; i++) {
// Skip dead particles
if (particles[i].dead) continue;
Graphics::setMatrix(mLocation, particles[i].M * V);
particles[i].render(tex, image);
}
Graphics::setRenderState(RenderState::DepthWrite, true);
}
float getRandom(float minValue, float maxValue) {
int randMax = 1000000;
int randInt = Random::get(0, randMax);
float r = (float) randInt / (float) randMax;
return minValue + r * (maxValue - minValue);
}
void EmitParticle(int index) {
// Calculate a random position inside the box
float x = getRandom(emitMin.x(), emitMax.x());
float y = getRandom(emitMin.y(), emitMax.y());
float z = getRandom(emitMin.z(), emitMax.z());
vec3 pos;
pos.set(x, y, z);
vec3 velocity(0, 0.3f, 0);
particles[index].Emit(pos, velocity, 3.0f);
}
};
namespace {
const int width = 1024;
const int height = 768;
double startTime;
Shader* vertexShader;
Shader* fragmentShader;
Program* program;
float angle = 0.0f;
// null terminated array of MeshObject pointers
MeshObject* objects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// null terminated array of PhysicsObject pointers
PhysicsObject* physicsObjects[] = { nullptr, nullptr, nullptr, nullptr, nullptr, nullptr };
// The view projection matrix aka the camera
mat4 P;
mat4 View;
mat4 PV;
vec3 cameraPosition;
MeshObject* sphere;
PhysicsObject* po;
PhysicsWorld physics;
// uniform locations - add more as you see fit
TextureUnit tex;
ConstantLocation pvLocation;
ConstantLocation mLocation;
ConstantLocation tintLocation;
Texture* particleImage;
ParticleSystem* particleSystem;
double lastTime;
void update() {
double t = System::time() - startTime;
double deltaT = t - lastTime;
//Kore::log(Info, "%f\n", deltaT);
lastTime = t;
Kore::Audio::update();
Graphics::begin();
Graphics::clear(Graphics::ClearColorFlag | Graphics::ClearDepthFlag, 0xff9999FF, 1000.0f);
Graphics::setFloat4(tintLocation, vec4(1, 1, 1, 1));
program->set();
angle += 0.3f * deltaT;
float x = 0 + 3 * Kore::cos(angle);
float z = 0 + 3 * Kore::sin(angle);
cameraPosition.set(x, 2, z);
//PV = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100) * mat4::lookAt(vec3(0, 2, -3), vec3(0, 2, 0), vec3(0, 1, 0));
P = mat4::Perspective(60, (float)width / (float)height, 0.1f, 100);
View = mat4::lookAt(vec3(x, 2, z), vec3(0, 2, 0), vec3(0, 1, 0));
PV = P * View;
Graphics::setMatrix(pvLocation, PV);
// iterate the MeshObjects
MeshObject** current = &objects[0];
while (*current != nullptr) {
// set the model matrix
Graphics::setMatrix(mLocation, (*current)->M);
(*current)->render(tex);
++current;
}
// Update the physics
physics.Update(deltaT);
PhysicsObject** currentP = &physics.physicsObjects[0];
while (*currentP != nullptr) {
(*currentP)->UpdateMatrix();
Graphics::setMatrix(mLocation, (*currentP)->Mesh->M);
(*currentP)->Mesh->render(tex);
++currentP;
}
particleSystem->update(deltaT);
particleSystem->render(tex, particleImage, mLocation, View);
Graphics::end();
Graphics::swapBuffers();
}
void SpawnSphere(vec3 Position, vec3 Velocity) {
PhysicsObject* po = new PhysicsObject();
po->SetPosition(Position);
po->Velocity = Velocity;
po->Collider.radius = 0.2f;
po->Mass = 5;
po->Mesh = sphere;
// The impulse should carry the object forward
// Use the inverse of the view matrix
po->ApplyImpulse(Velocity);
physics.AddObject(po);
}
void keyDown(KeyCode code, wchar_t character) {
if (code == Key_Space) {
// The impulse should carry the object forward
// Use the inverse of the view matrix
vec4 impulse(0, 0.4, 2, 0);
mat4 viewI = View;
viewI.Invert();
impulse = viewI * impulse;
vec3 impulse3(impulse.x(), impulse.y(), impulse.z());
SpawnSphere(cameraPosition + impulse3 *0.2f, impulse3);
}
}
void keyUp(KeyCode code, wchar_t character) {
if (code == Key_Left) {
// ...
}
}
void mouseMove(int x, int y, int movementX, int movementY) {
}
void mousePress(int button, int x, int y) {
}
void mouseRelease(int button, int x, int y) {
}
void init() {
FileReader vs("shader.vert");
FileReader fs("shader.frag");
vertexShader = new Shader(vs.readAll(), vs.size(), VertexShader);
fragmentShader = new Shader(fs.readAll(), fs.size(), FragmentShader);
// This defines the structure of your Vertex Buffer
VertexStructure structure;
structure.add("pos", Float3VertexData);
structure.add("tex", Float2VertexData);
structure.add("nor", Float3VertexData);
program = new Program;
program->setVertexShader(vertexShader);
program->setFragmentShader(fragmentShader);
program->link(structure);
tex = program->getTextureUnit("tex");
pvLocation = program->getConstantLocation("PV");
mLocation = program->getConstantLocation("M");
tintLocation = program->getConstantLocation("tint");
objects[0] = new MeshObject("Base.obj", "Level/basicTiles6x6.png", structure);
objects[0]->M = mat4::Translation(0.0f, 1.0f, 0.0f);
sphere = new MeshObject("ball_at_origin.obj", "Level/unshaded.png", structure);
SpawnSphere(vec3(0, 2, 0), vec3(0, 0, 0));
Graphics::setRenderState(DepthTest, true);
Graphics::setRenderState(DepthTestCompare, ZCompareLess);
Graphics::setTextureAddressing(tex, U, Repeat);
Graphics::setTextureAddressing(tex, V, Repeat);
particleImage = new Texture("SuperParticle.png", true);
particleSystem = new ParticleSystem(100, structure);
}
}
int kore(int argc, char** argv) {
Application* app = new Application(argc, argv, width, height, 0, false, "Exercise7");
init();
app->setCallback(update);
startTime = System::time();
lastTime = 0.0f;
Kore::Mixer::init();
Kore::Audio::init();
Keyboard::the()->KeyDown = keyDown;
Keyboard::the()->KeyUp = keyUp;
Mouse::the()->Move = mouseMove;
Mouse::the()->Press = mousePress;
Mouse::the()->Release = mouseRelease;
app->start();
delete app;
return 0;
}
There's a comment where the teacher wants us to add the code.
The variable for the view matrix "View" is in "namespace". I've only ever used namespace as a library but this one doesn't have a name. So how do I use it?
The comment says that we should use matrix V. So I just add V = Inverse View Matrix * Model Matrix to the code and it removes the rotation?
I'm sorry for the stupid questions, it's supposed to be a class for beginners but it's really anything but. The lecture notes aren't very helpful when it comes to the programming part and I only found tutorials for OpenGL or Unity or Direct X and where not using any of it.
Please help me, I need to hand this in until Saturday morning and I've already spent the last two days trying out code and I've got nothing so far!
You can find the whole thing here: https://github.com/TUDGameTechnology/Exercise7
You don't have to do anything special to access an unnamed namespace. This thread explains more.
You are most probably trying to reference View within methods that cannot see your namespace because of the order in which they are defined in your file.
This line in your update method:
particleSystem->render(tex, particleImage, mLocation, View);
is already passing View into the render method.
void render(TextureUnit tex, Texture* image, ConstantLocation mLocation, mat4 V)
That means that in this case mat4 v is your camera view.

Separating Axis Theorom - false positive on vertical axis

I've implemented a collision detecting using SAT and I get false positives if another bbox is above or below the bounding box.
I'm projecting each (for now) axis aligned face of the bounding box against its own and the other bbox corners.
The collision data returned is correct (depth, stepheight), but the fact that it returns a collision for each object that is above or below the bounding box is ofc false.
I check my results against a simple aabb collision check which works.
Here is the code:
//------------------------------
BoundingBoxIntersectionResult BoundingBox::IntersectionSAT(BoundingBox & other)
{
// check shortest edge
f32 distances[6] = {
(other.mMaxVec.x - this->mMinVec.x),
(this->mMaxVec.x - other.mMinVec.x),
(other.mMaxVec.y - this->mMinVec.y),
(this->mMaxVec.y - other.mMinVec.y),
(other.mMaxVec.z - this->mMinVec.z),
(this->mMaxVec.z - other.mMinVec.z)
};
i32 faceIndex = 0;
Vec3 faceNormal;
f32 collisionDepth = 0.0f;
// for each face normal, get the minimum and maximum extens of the projection
// of all corner points of both shapes
// if they dont overlap, there is no intersection
// check each normal
for (ui32 i = 0; i < 6; i++)
{
// CornerPointsWorld represents the world space corner positions
SATReturn ret = this->SATTest(this->mNormals[i], this->mCornerPointsWorld);
SATReturn ret2 = this->SATTest(this->mNormals[i], other.mCornerPointsWorld);
float d1 = ret.minAlong - ret2.maxAlong;
float d2 = ret2.minAlong - ret.maxAlong;
if ((d1 > 0.0f) || (d2> 0.0f))
{
// return a false collision event, because we got a seperating axis
return { false, 0.0f, 0.0f, Vec3(), BBOX_SIDE_LEFT };
}
}
// check each normal of the other bbox
for (ui32 i = 0; i < 6; i++)
{
SATReturn ret = this->SATTest(other.mNormals[i], this->mCornerPointsWorld);
SATReturn ret2 = this->SATTest(other.mNormals[i], other.mCornerPointsWorld);
float d1 = ret.minAlong - ret2.maxAlong;
float d2 = ret2.minAlong - ret.maxAlong;
if ((d1 > 0.0f) || (d2> 0.0f))
{
// return a false collision event, because we got a seperating axis
return { false, 0.0f, 0.0f, Vec3(), BBOX_SIDE_LEFT };
}
// get collision data
if (i == 0 || distances[i] < collisionDepth)
{
faceIndex = i;
faceNormal = this->mNormals[i];
collisionDepth = distances[i];
}
}
// get step height needed to climb this object
f32 stepHeight = other.mMaxVec.y - this->mMinVec.y;
return { true, collisionDepth, stepHeight, faceNormal, BoundingBoxSide(faceIndex) };
}
//------------------------------
SATReturn BoundingBox::SATTest(Vec3& normal, Vector<Vec3>& corners)
{
SATReturn ret;
ret.maxAlong = MIN_FLOAT;
ret.minAlong = MAX_FLOAT;
// for each point
for (ui32 i = 0; i < corners.GetSize(); i++)
{
f32 dot = Vec3::Dot(corners[i], normal);
if (dot < ret.minAlong) ret.minAlong = dot;
if (dot > ret.maxAlong) ret.maxAlong = dot;
}
return ret;
}
where face normals are defined as:
Vec3 mNormals[6] =
{
Vec3(1, 0, 0), // left
Vec3(-1, 0, 0), // right
Vec3(0, 1, 0), // up
Vec3(0, -1, 0), // down
Vec3(0, 0, 1), //back
Vec3(0, 0, -1), // front
};
I've added a screenshot to display the problem:
So the problem is:
It returns false positive for all objects below or above the bounding box.

C++ Raytracer with opengl display skew in specific resolution

I have a ray tracer (from www.scratchapixel.com) that I use to write a image to memory that I then display at once using Opengl (glut). I use the width and height and divide the screen to get a Opengl point for every pixels. It kinda works.
My problem is that my width has to be between 500 and 799. It cannot be <= 499 of >= 800, witch doesn't make sense to me. The image becomes skew. I have tried it on 2 computers with the same result.
799x480
800x480
Here's the full code:
#define _USE_MATH_DEFINES
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <fstream>
#include <vector>
#include <iostream>
#include <cassert>
// OpenGl
#include "GL/glut.h"
GLuint width = 799, height = 480;
GLdouble width_step = 2.0f / width, height_step = 2.0f / height;
const int MAX_RAY_DEPTH = 3;
const double INFINITY = HUGE_VAL;
template<typename T>
class Vec3
{
public:
T x, y, z;
// Vector constructors.
Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
Vec3(T xx) : x(xx), y(xx), z(xx) {}
Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
// Vector normalisation.
Vec3& normalize()
{
T nor = x * x + y * y + z * z;
if (nor > 1) {
T invNor = 1 / sqrt(nor);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
// Vector operators.
Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
};
template<typename T>
class Sphere
{
public:
// Sphere variables.
Vec3<T> center; /// position of the sphere
T radius, radius2; /// sphere radius and radius^2
Vec3<T> surfaceColor, emissionColor; /// surface color and emission (light)
T transparency, reflection; /// surface transparency and reflectivity
// Sphere constructor.
// position(c), radius(r), surface color(sc), reflectivity(refl), transparency(transp), emission color(ec)
Sphere(const Vec3<T> &c, const T &r, const Vec3<T> &sc,
const T &refl = 0, const T &transp = 0, const Vec3<T> &ec = 0) :
center(c), radius(r), surfaceColor(sc), reflection(refl),
transparency(transp), emissionColor(ec), radius2(r * r)
{}
// compute a ray-sphere intersection using the geometric solution
bool intersect(const Vec3<T> &rayorig, const Vec3<T> &raydir, T *t0 = NULL, T *t1 = NULL) const
{
// we start with a vector (l) from the ray origin (rayorig) to the center of the curent sphere.
Vec3<T> l = center - rayorig;
// tca is a vector length in the direction of the normalise raydir.
// its length is streched using dot until it forms a perfect right angle triangle with the l vector.
T tca = l.dot(raydir);
// if tca is < 0, the raydir is going in the opposite direction. No need to go further. Return false.
if (tca < 0) return false;
// if we keep on into the code, it's because the raydir may still hit the sphere.
// l.dot(l) gives us the l vector length to the power of 2. Then we use Pythagoras' theorem.
// remove the length tca to the power of two (tca * tca) and we get a distance from the center of the sphere to the power of 2 (d2).
T d2 = l.dot(l) - (tca * tca);
// if this distance to the center (d2) is greater than the radius to the power of 2 (radius2), the raydir direction is missing the sphere.
// No need to go further. Return false.
if (d2 > radius2) return false;
// Pythagoras' theorem again: radius2 is the hypotenuse and d2 is one of the side. Substraction gives the third side to the power of 2.
// Using sqrt, we obtain the length thc. thc is how deep tca goes into the sphere.
T thc = sqrt(radius2 - d2);
if (t0 != NULL && t1 != NULL) {
// remove thc to tca and you get the length from the ray origin to the surface hit point of the sphere.
*t0 = tca - thc;
// add thc to tca and you get the length from the ray origin to the surface hit point of the back side of the sphere.
*t1 = tca + thc;
}
// There is a intersection with a sphere, t0 and t1 have surface distances values. Return true.
return true;
}
};
std::vector<Sphere<double> *> spheres;
// function to mix 2 T varables.
template<typename T>
T mix(const T &a, const T &b, const T &mix)
{
return b * mix + a * (T(1) - mix);
}
// This is the main trace function. It takes a ray as argument (defined by its origin
// and direction). We test if this ray intersects any of the geometry in the scene.
// If the ray intersects an object, we compute the intersection point, the normal
// at the intersection point, and shade this point using this information.
// Shading depends on the surface property (is it transparent, reflective, diffuse).
// The function returns a color for the ray. If the ray intersects an object, it
// returns the color of the object at the intersection point, otherwise it returns
// the background color.
template<typename T>
Vec3<T> trace(const Vec3<T> &rayorig, const Vec3<T> &raydir,
const std::vector<Sphere<T> *> &spheres, const int &depth)
{
T tnear = INFINITY;
const Sphere<T> *sphere = NULL;
// Try to find intersection of this raydir with the spheres in the scene
for (unsigned i = 0; i < spheres.size(); ++i) {
T t0 = INFINITY, t1 = INFINITY;
if (spheres[i]->intersect(rayorig, raydir, &t0, &t1)) {
// is the rayorig inside the sphere (t0 < 0)? If so, use the second hit (t0 = t1)
if (t0 < 0) t0 = t1;
// tnear is the last sphere intersection (or infinity). Is t0 in front of tnear?
if (t0 < tnear) {
// if so, update tnear to this closer t0 and update the closest sphere
tnear = t0;
sphere = spheres[i];
}
}
}
// At this moment in the program, we have the closest sphere (sphere) and the closest hit position (tnear)
// For this pixel, if there's no intersection with a sphere, return a Vec3 with the background color.
if (!sphere) return Vec3<T>(.5); // Grey background color.
// if we keep on with the code, it is because we had an intersection with at least one sphere.
Vec3<T> surfaceColor = 0; // initialisation of the color of the ray/surface of the object intersected by the ray.
Vec3<T> phit = rayorig + (raydir * tnear); // point of intersection.
Vec3<T> nhit = phit - sphere->center; // normal at the intersection point.
// if the normal and the view direction are not opposite to each other,
// reverse the normal direction.
if (raydir.dot(nhit) > 0) nhit = -nhit;
nhit.normalize(); // normalize normal direction
// The angle between raydir and the normal at point hit (not used).
//T s_angle = acos(raydir.dot(nhit)) / ( sqrt(raydir.dot(raydir)) * sqrt(nhit.dot(nhit)));
//T s_incidence = sin(s_angle);
T bias = 1e-5; // add some bias to the point from which we will be tracing
// Do we have transparency or reflection?
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
T IdotN = raydir.dot(nhit); // raydir.normal
// I and N are not pointing in the same direction, so take the invert.
T facingratio = std::max(T(0), -IdotN);
// change the mix value between reflection and refraction to tweak the effect (fresnel effect)
T fresneleffect = mix<T>(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3<T> refldir = raydir - nhit * 2 * raydir.dot(nhit);
Vec3<T> reflection = trace(phit + (nhit * bias), refldir, spheres, depth + 1);
Vec3<T> refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
T ior = 1.2, eta = 1 / ior;
T k = 1 - eta * eta * (1 - IdotN * IdotN);
Vec3<T> refrdir = raydir * eta - nhit * (eta * IdotN + sqrt(k));
refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
// Look at all sphere to find lights
double shadow = 1.0;
for (unsigned i = 0; i < spheres.size(); ++i) {
if (spheres[i]->emissionColor.x > 0) {
// this is a light
Vec3<T> transmission = 1.0;
Vec3<T> lightDirection = spheres[i]->center - phit;
lightDirection.normalize();
T light_angle = (acos(raydir.dot(lightDirection)) / ( sqrt(raydir.dot(raydir)) * sqrt(lightDirection.dot(lightDirection))));
T light_incidence = sin(light_angle);
for (unsigned j = 0; j < spheres.size(); ++j) {
if (i != j) {
T t0, t1;
// Does the ray from point hit to the light intersect an object?
// If so, calculate the shadow.
if (spheres[j]->intersect(phit + (nhit * bias), lightDirection, &t0, &t1)) {
shadow = std::max(0.0, shadow - (1.0 - spheres[j]->transparency));
transmission = transmission * spheres[j]->surfaceColor * shadow;
//break;
}
}
}
// For each light found, we add light transmission to the pixel.
surfaceColor += sphere->surfaceColor * transmission *
std::max(T(0), nhit.dot(lightDirection)) * spheres[i]->emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
// Main rendering function. We compute a camera ray for each pixel of the image,
// trace it and return a color. If the ray hits a sphere, we return the color of the
// sphere at the intersection point, else we return the background color.
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
//********************************** OPEN GL ***********************************************
void init(void)
{
/* Select clearing (background) color */
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_FLAT);
/* Initialize viewing values */
//glMatrixMode(GL_PROJECTION);
gluOrtho2D(0,width,0,height);
}
void advanceDisplay(void)
{
cam_pos.z = cam_pos.z - 2;
glutPostRedisplay();
}
void backDisplay(void)
{
cam_pos.z = cam_pos.z + 2;
glutPostRedisplay();
}
void resetDisplay(void)
{
Vec3<double> new_cam_pos;
new_cam_pos = cam_pos;
cam_pos = new_cam_pos;
glutPostRedisplay();
}
void reshape(int w, int h)
{
glLoadIdentity();
gluOrtho2D(0,width,0,height);
glLoadIdentity();
}
void mouse(int button, int state, int x, int y)
{
switch (button)
{
case GLUT_LEFT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(advanceDisplay);
}
break;
case GLUT_MIDDLE_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(resetDisplay);
}
break;
case GLUT_RIGHT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(backDisplay);
}
break;
}
}
void display(void)
{
int i;
float x, y;
/* clear all pixels */
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
render<double>(spheres); // Creates the image and put it to memory in image[].
i=0;
glBegin(GL_POINTS);
for(y=1.0f;y>-1.0;y=y-height_step)
{
for(x=1.0f;x>-1.0;x=x-width_step)
{
glColor3f((std::min(double(1), image[i].x)),
(std::min(double(1), image[i].y)),
(std::min(double(1), image[i].z)));
glVertex2f(x, y);
if(i < width*height)
{
i = i + 1;
}
}
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
int main(int argc, char **argv)
{
// position, radius, surface color, reflectivity, transparency, emission color
spheres.push_back(new Sphere<double>(Vec3<double>(0, -10004, -20), 10000, Vec3<double>(0.2), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(3, 0, -15), 2, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(1, -1, -18), 1, Vec3<double>(1.0, 1.0, 1.0), 0.9, 0.9));
spheres.push_back(new Sphere<double>(Vec3<double>(-2, 2, -15), 2, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 3, -18), 1, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 0, -25), 1, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(-1, 1, -25), 2, Vec3<double>(1.0, 1.0, 1.0), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(2, 2, -25), 1, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(5, 3, -25), 2, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
// light
spheres.push_back(new Sphere<double>(Vec3<double>(-10, 20, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(3)));
spheres.push_back(new Sphere<double>(Vec3<double>(0, 10, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(1)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(width, height);
glutInitWindowPosition(10,10);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMouseFunc(mouse);
glutMainLoop();
delete [] image;
while (!spheres.empty()) {
Sphere<double> *sph = spheres.back();
spheres.pop_back();
delete sph;
}
return 0;
}
This is where the image is written to memory:
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
This is where I read it back and write it to each point of Opengl:
void display(void)
{
int i;
float x, y;
/* clear all pixels */
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
render<double>(spheres); // Creates the image and put it to memory in image[].
i=0;
glBegin(GL_POINTS);
for(y=1.0f;y>-1.0;y=y-height_step)
{
for(x=1.0f;x>-1.0;x=x-width_step)
{
glColor3f((std::min(double(1), image[i].x)),
(std::min(double(1), image[i].y)),
(std::min(double(1), image[i].z)));
glVertex2f(x, y);
if(i < width*height)
{
i = i + 1;
}
}
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
I have no idea what is causing this. Is it a bad design? An Opengl display mode? I don't know.
Is it a bad design?
Yes! Upload your rendered scene to a texture and then render a quad with it:
// g++ -O3 main.cpp -lglut -lGL -lGLU
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <fstream>
#include <vector>
#include <iostream>
#include <cassert>
// OpenGl
#include "GL/glut.h"
GLuint width = 800, height = 480;
GLdouble width_step = 2.0f / width;
GLdouble height_step = 2.0f / height;
const int MAX_RAY_DEPTH = 3;
template<typename T>
class Vec3
{
public:
T x, y, z;
// Vector constructors.
Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
Vec3(T xx) : x(xx), y(xx), z(xx) {}
Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
// Vector normalisation.
Vec3& normalize()
{
T nor = x * x + y * y + z * z;
if (nor > 1) {
T invNor = 1 / sqrt(nor);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
// Vector operators.
Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
};
template<typename T>
class Sphere
{
public:
// Sphere variables.
Vec3<T> center; /// position of the sphere
T radius, radius2; /// sphere radius and radius^2
Vec3<T> surfaceColor, emissionColor; /// surface color and emission (light)
T transparency, reflection; /// surface transparency and reflectivity
// Sphere constructor.
// position(c), radius(r), surface color(sc), reflectivity(refl), transparency(transp), emission color(ec)
Sphere(const Vec3<T> &c, const T &r, const Vec3<T> &sc,
const T &refl = 0, const T &transp = 0, const Vec3<T> &ec = 0) :
center(c), radius(r), surfaceColor(sc), reflection(refl),
transparency(transp), emissionColor(ec), radius2(r * r)
{}
// compute a ray-sphere intersection using the geometric solution
bool intersect(const Vec3<T> &rayorig, const Vec3<T> &raydir, T *t0 = NULL, T *t1 = NULL) const
{
// we start with a vector (l) from the ray origin (rayorig) to the center of the curent sphere.
Vec3<T> l = center - rayorig;
// tca is a vector length in the direction of the normalise raydir.
// its length is streched using dot until it forms a perfect right angle triangle with the l vector.
T tca = l.dot(raydir);
// if tca is < 0, the raydir is going in the opposite direction. No need to go further. Return false.
if (tca < 0) return false;
// if we keep on into the code, it's because the raydir may still hit the sphere.
// l.dot(l) gives us the l vector length to the power of 2. Then we use Pythagoras' theorem.
// remove the length tca to the power of two (tca * tca) and we get a distance from the center of the sphere to the power of 2 (d2).
T d2 = l.dot(l) - (tca * tca);
// if this distance to the center (d2) is greater than the radius to the power of 2 (radius2), the raydir direction is missing the sphere.
// No need to go further. Return false.
if (d2 > radius2) return false;
// Pythagoras' theorem again: radius2 is the hypotenuse and d2 is one of the side. Substraction gives the third side to the power of 2.
// Using sqrt, we obtain the length thc. thc is how deep tca goes into the sphere.
T thc = sqrt(radius2 - d2);
if (t0 != NULL && t1 != NULL) {
// remove thc to tca and you get the length from the ray origin to the surface hit point of the sphere.
*t0 = tca - thc;
// add thc to tca and you get the length from the ray origin to the surface hit point of the back side of the sphere.
*t1 = tca + thc;
}
// There is a intersection with a sphere, t0 and t1 have surface distances values. Return true.
return true;
}
};
std::vector<Sphere<double> *> spheres;
// function to mix 2 T varables.
template<typename T>
T mix(const T &a, const T &b, const T &mix)
{
return b * mix + a * (T(1) - mix);
}
// This is the main trace function. It takes a ray as argument (defined by its origin
// and direction). We test if this ray intersects any of the geometry in the scene.
// If the ray intersects an object, we compute the intersection point, the normal
// at the intersection point, and shade this point using this information.
// Shading depends on the surface property (is it transparent, reflective, diffuse).
// The function returns a color for the ray. If the ray intersects an object, it
// returns the color of the object at the intersection point, otherwise it returns
// the background color.
template<typename T>
Vec3<T> trace(const Vec3<T> &rayorig, const Vec3<T> &raydir,
const std::vector<Sphere<T> *> &spheres, const int &depth)
{
T tnear = INFINITY;
const Sphere<T> *sphere = NULL;
// Try to find intersection of this raydir with the spheres in the scene
for (unsigned i = 0; i < spheres.size(); ++i) {
T t0 = INFINITY, t1 = INFINITY;
if (spheres[i]->intersect(rayorig, raydir, &t0, &t1)) {
// is the rayorig inside the sphere (t0 < 0)? If so, use the second hit (t0 = t1)
if (t0 < 0) t0 = t1;
// tnear is the last sphere intersection (or infinity). Is t0 in front of tnear?
if (t0 < tnear) {
// if so, update tnear to this closer t0 and update the closest sphere
tnear = t0;
sphere = spheres[i];
}
}
}
// At this moment in the program, we have the closest sphere (sphere) and the closest hit position (tnear)
// For this pixel, if there's no intersection with a sphere, return a Vec3 with the background color.
if (!sphere) return Vec3<T>(.5); // Grey background color.
// if we keep on with the code, it is because we had an intersection with at least one sphere.
Vec3<T> surfaceColor = 0; // initialisation of the color of the ray/surface of the object intersected by the ray.
Vec3<T> phit = rayorig + (raydir * tnear); // point of intersection.
Vec3<T> nhit = phit - sphere->center; // normal at the intersection point.
// if the normal and the view direction are not opposite to each other,
// reverse the normal direction.
if (raydir.dot(nhit) > 0) nhit = -nhit;
nhit.normalize(); // normalize normal direction
// The angle between raydir and the normal at point hit (not used).
//T s_angle = acos(raydir.dot(nhit)) / ( sqrt(raydir.dot(raydir)) * sqrt(nhit.dot(nhit)));
//T s_incidence = sin(s_angle);
T bias = 1e-5; // add some bias to the point from which we will be tracing
// Do we have transparency or reflection?
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
T IdotN = raydir.dot(nhit); // raydir.normal
// I and N are not pointing in the same direction, so take the invert.
T facingratio = std::max(T(0), -IdotN);
// change the mix value between reflection and refraction to tweak the effect (fresnel effect)
T fresneleffect = mix<T>(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3<T> refldir = raydir - nhit * 2 * raydir.dot(nhit);
Vec3<T> reflection = trace(phit + (nhit * bias), refldir, spheres, depth + 1);
Vec3<T> refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
T ior = 1.2, eta = 1 / ior;
T k = 1 - eta * eta * (1 - IdotN * IdotN);
Vec3<T> refrdir = raydir * eta - nhit * (eta * IdotN + sqrt(k));
refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
// Look at all sphere to find lights
double shadow = 1.0;
for (unsigned i = 0; i < spheres.size(); ++i) {
if (spheres[i]->emissionColor.x > 0) {
// this is a light
Vec3<T> transmission = 1.0;
Vec3<T> lightDirection = spheres[i]->center - phit;
lightDirection.normalize();
T light_angle = (acos(raydir.dot(lightDirection)) / ( sqrt(raydir.dot(raydir)) * sqrt(lightDirection.dot(lightDirection))));
T light_incidence = sin(light_angle);
for (unsigned j = 0; j < spheres.size(); ++j) {
if (i != j) {
T t0, t1;
// Does the ray from point hit to the light intersect an object?
// If so, calculate the shadow.
if (spheres[j]->intersect(phit + (nhit * bias), lightDirection, &t0, &t1)) {
shadow = std::max(0.0, shadow - (1.0 - spheres[j]->transparency));
transmission = transmission * spheres[j]->surfaceColor * shadow;
//break;
}
}
}
// For each light found, we add light transmission to the pixel.
surfaceColor += sphere->surfaceColor * transmission *
std::max(T(0), nhit.dot(lightDirection)) * spheres[i]->emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
// Main rendering function. We compute a camera ray for each pixel of the image,
// trace it and return a color. If the ray hits a sphere, we return the color of the
// sphere at the intersection point, else we return the background color.
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
//********************************** OPEN GL ***********************************************
void advanceDisplay(void)
{
cam_pos.z = cam_pos.z - 2;
glutPostRedisplay();
}
void backDisplay(void)
{
cam_pos.z = cam_pos.z + 2;
glutPostRedisplay();
}
void resetDisplay(void)
{
Vec3<double> new_cam_pos;
new_cam_pos = cam_pos;
cam_pos = new_cam_pos;
glutPostRedisplay();
}
void mouse(int button, int state, int x, int y)
{
switch (button)
{
case GLUT_LEFT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(advanceDisplay);
}
break;
case GLUT_MIDDLE_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(resetDisplay);
}
break;
case GLUT_RIGHT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(backDisplay);
}
break;
}
}
GLuint tex = 0;
void display(void)
{
int i;
float x, y;
render<double>(spheres); // Creates the image and put it to memory in image[].
std::vector< unsigned char > buf;
buf.reserve( width * height * 3 );
for( size_t y = 0; y < height; ++y )
{
for( size_t x = 0; x < width; ++x )
{
// flip vertically (height-y) because the OpenGL texture origin is in the lower-left corner
// flip horizontally (width-x) because...the original code did so
size_t i = (height-y) * width + (width-x);
buf.push_back( (unsigned char)( std::min(double(1), image[i].x) * 255.0 ) );
buf.push_back( (unsigned char)( std::min(double(1), image[i].y) * 255.0 ) );
buf.push_back( (unsigned char)( std::min(double(1), image[i].z) * 255.0 ) );
}
}
/* clear all pixels */
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, tex );
glTexSubImage2D
(
GL_TEXTURE_2D, 0,
0, 0,
width, height,
GL_RGB,
GL_UNSIGNED_BYTE,
&buf[0]
);
glBegin( GL_QUADS );
glTexCoord2i( 0, 0 );
glVertex2i( -1, -1 );
glTexCoord2i( 1, 0 );
glVertex2i( 1, -1 );
glTexCoord2i( 1, 1 );
glVertex2i( 1, 1 );
glTexCoord2i( 0, 1 );
glVertex2i( -1, 1 );
glEnd();
glutSwapBuffers();
}
int main(int argc, char **argv)
{
// position, radius, surface color, reflectivity, transparency, emission color
spheres.push_back(new Sphere<double>(Vec3<double>(0, -10004, -20), 10000, Vec3<double>(0.2), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(3, 0, -15), 2, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(1, -1, -18), 1, Vec3<double>(1.0, 1.0, 1.0), 0.9, 0.9));
spheres.push_back(new Sphere<double>(Vec3<double>(-2, 2, -15), 2, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 3, -18), 1, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 0, -25), 1, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(-1, 1, -25), 2, Vec3<double>(1.0, 1.0, 1.0), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(2, 2, -25), 1, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(5, 3, -25), 2, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
// light
spheres.push_back(new Sphere<double>(Vec3<double>(-10, 20, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(3)));
spheres.push_back(new Sphere<double>(Vec3<double>(0, 10, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(1)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(width, height);
glutInitWindowPosition(10,10);
glutCreateWindow(argv[0]);
glutDisplayFunc(display);
glutMouseFunc(mouse);
glGenTextures( 1, &tex );
glBindTexture( GL_TEXTURE_2D, tex );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );
glTexImage2D( GL_TEXTURE_2D, 0, 3, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL );
glutMainLoop();
delete [] image;
while (!spheres.empty()) {
Sphere<double> *sph = spheres.back();
spheres.pop_back();
delete sph;
}
return 0;
}
How to load and display images is also explained on www.scratchapixel.com. Strange you didn't see this lesson:
http://www.scratchapixel.com/lessons/3d-basic-lessons/lesson-5-colors-and-digital-images/source-code/
It's all in there and they explain you how to display images using GL textures indeed.