Simple Oriented Bounding Box OBB collision detection explaining - c++

I can implement the AABB method to detect collisions it is easy and cheap but I want to implement OBB for more accuracy so I create the bounding box with the model initialization it is consists of 8 bounding vertices and center, each frame I transform all the vertices with the transformation matrix to fit the Oriented Bounding Box but I can't understand the method for detecting the collision between two OBBs and I can't find a simplified and clear tutorial which explain the algorithm with the code view point not the math because I am not a mathematician.
if I have
struct Box {
glm::vec3 vertices[8];
Box() {
for (int i = 0; i < 8; i++) {
vertices[i] = glm::vec3(0);
}
}
glm::vec3 max;
glm::vec3 min;
glm::vec3 origin;
void reCompute() {
max = vertices[0];
min = vertices[0];
for (int i = 1; i < 8; i++) {
max.x = max.x > vertices[i].x ? max.x : vertices[i].x;
max.y = max.y > vertices[i].y ? max.y : vertices[i].y;
max.z = max.z > vertices[i].z ? max.z : vertices[i].z;
min.x = min.x < vertices[i].x ? min.x : vertices[i].x;
min.y = min.y < vertices[i].y ? min.y : vertices[i].y;
min.z = min.z < vertices[i].z ? min.z : vertices[i].z;
}
origin = glm::vec3((max.x + min.x) / 2.0f, (max.y + min.y) / 2.0f, (max.z + min.z) / 2.0f);
}
//AABB intersection
bool intersects(const Box &b) const {
return (min.x < b.max.x) && (max.x > b.min.x) && (min.y < b.max.y) && (max.y > b.min.y) && (min.z < b.max.z) && (max.z > b.min.z) && *this != b;
}
bool operator==(const Box& b) const {
return (max.x == b.max.x && max.y == b.max.y && max.z == b.max.z && min.x == b.min.x && min.y == b.min.y && min.z == b.min.z);
}
bool operator!=(const Box& b) const {
return (max.x != b.max.x) || (max.y != b.max.y) || (max.z != b.max.z) || (min.x != b.min.x) || (min.y != b.min.y) || (min.z != b.min.z);
}
};
on model initialization I create the box
box.vertices[0] = glm::vec3(meshMinX, meshMinY, meshMinZ);
box.vertices[1] = glm::vec3(meshMaxX, meshMinY, meshMinZ);
box.vertices[2] = glm::vec3(meshMinX, meshMaxY, meshMinZ);
box.vertices[3] = glm::vec3(meshMaxX, meshMaxY, meshMinZ);
box.vertices[4] = glm::vec3(meshMinX, meshMinY, meshMaxZ);
box.vertices[5] = glm::vec3(meshMaxX, meshMinY, meshMaxZ);
box.vertices[6] = glm::vec3(meshMinX, meshMaxY, meshMaxZ);
box.vertices[7] = glm::vec3(meshMaxX, meshMaxY, meshMaxZ);
and each frame I recompute the box with the transformation matrix of the model
for (int n = 0; n < 8; n++) {
boxs[j].vertices[n] = glm::vec3(matrix * glm::vec4(box.vertices[n], 1));
}
boxs[j].reCompute();

A C++ code implementation of the separating axis theorem for simple collision detection between two 3D OBB would be this:
#include <iostream>
// define the operations to be used in our 3D vertices
struct vec3
{
float x, y, z;
vec3 operator- (const vec3 & rhs) const { return{ x - rhs.x, y - rhs.y, z - rhs.z }; }
float operator* (const vec3 & rhs) const { return{ x * rhs.x + y * rhs.y + z * rhs.z }; } // DOT PRODUCT
vec3 operator^ (const vec3 & rhs) const { return{ y * rhs.z - z * rhs.y, z * rhs.x - x * rhs.z, x * rhs.y - y * rhs.x }; } // CROSS PRODUCT
vec3 operator* (const float& rhs)const { return vec3{ x * rhs, y * rhs, z * rhs }; }
};
// set the relevant elements of our oriented bounding box
struct OBB
{
vec3 Pos, AxisX, AxisY, AxisZ, Half_size;
};
// check if there's a separating plane in between the selected axes
bool getSeparatingPlane(const vec3& RPos, const vec3& Plane, const OBB& box1, const OBB&box2)
{
return (fabs(RPos*Plane) >
(fabs((box1.AxisX*box1.Half_size.x)*Plane) +
fabs((box1.AxisY*box1.Half_size.y)*Plane) +
fabs((box1.AxisZ*box1.Half_size.z)*Plane) +
fabs((box2.AxisX*box2.Half_size.x)*Plane) +
fabs((box2.AxisY*box2.Half_size.y)*Plane) +
fabs((box2.AxisZ*box2.Half_size.z)*Plane)));
}
// test for separating planes in all 15 axes
bool getCollision(const OBB& box1, const OBB&box2)
{
static vec3 RPos;
RPos = box2.Pos - box1.Pos;
return !(getSeparatingPlane(RPos, box1.AxisX, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisY, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisZ, box1, box2) ||
getSeparatingPlane(RPos, box2.AxisX, box1, box2) ||
getSeparatingPlane(RPos, box2.AxisY, box1, box2) ||
getSeparatingPlane(RPos, box2.AxisZ, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisX^box2.AxisX, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisX^box2.AxisY, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisX^box2.AxisZ, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisY^box2.AxisX, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisY^box2.AxisY, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisY^box2.AxisZ, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisZ^box2.AxisX, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisZ^box2.AxisY, box1, box2) ||
getSeparatingPlane(RPos, box1.AxisZ^box2.AxisZ, box1, box2));
}
// a quick test to see the code working
int _tmain(int argc, _TCHAR* argv[])
{
// create two obbs
OBB A, B;
// set the first obb's properties
A.Pos = { 0.f, 0.f, 0.f }; // set its center position
// set the half size
A.Half_size.x = 10.f;
A.Half_size.y = 1.f;
A.Half_size.z = 1.f;
// set the axes orientation
A.AxisX = { 1.f, 0.f, 0.f };
A.AxisY = { 0.f, 1.f, 0.f };
A.AxisZ = { 0.f, 0.f, 1.f };
// set the second obb's properties
B.Pos = { 20.f, 0.f, 0.f }; // set its center position
// set the half size
B.Half_size.x = 10.f;
B.Half_size.y = 1.f;
B.Half_size.z = 1.f;
// set the axes orientation
B.AxisX = { 1.f, 0.f, 0.f };
B.AxisY = { 0.f, 1.f, 0.f };
B.AxisZ = { 0.f, 0.f, 1.f };
// run the code and get the result as a message
if (getCollision(A, B)) std::cout << "Collision!!!" << std::endl;
else std::cout << "No collision." << std::endl;
// pause and quit
std::cout << std::endl;
system("pause");
return 0;
}

To know if two OBB collide you use SAT(separating axis theorem): you have to project all the points of the two shapes on every normal of the two shapes. Then you see if the projection of the two shapes overlap on each normals there is a collision. If there is at least one normal where there is no overlap then they do not collide.
And that's all, to do that you will need a method to do orthogonal projection of a vector on another vector which returns a scalar, and a method to see if two intervals overlap.
I have some code in Java :
Orthagonal projection of U on V :
/**
* Vec u is projected on Vec v
* #param u 2d point
* #param v 2d axe
* #return the orthogonal projection
*/
public static float orthagonalProjectionOf(Vector2f u, Vector2f v){
float norme_u = u.lenght();
float norme_v = v.lenght();
float dot_u_v = dot(u, v);
float buffer = (dot_u_v/(norme_u*norme_v))*norme_u;
if(Float.isNaN(buffer))return 0;//If the vector u is null, then is orthogonal projection is 0, not a NaN
else return buffer;
}
Overlapping of two interval :
/**
* Get the overlapping of two interval on an axis.
* #param minA
* #param maxA
* #param minB
* #param maxB
* #return true overlapping. false if there is no overlapping
*/
public static boolean isOverlapping(float minA, float maxA, float minB, float maxB) {
float minOverlap = Float.NaN;
float maxOverlap = Float.NaN;
//If B contain in A
if(minA <= minB && minB <= maxA) {
if(Float.isNaN(minOverlap) || minB < minOverlap)minOverlap = minB;
}
if(minA <= maxB && maxB <= maxA) {
if(Float.isNaN(maxOverlap) || maxB > minOverlap)maxOverlap = maxB;
}
//If A contain in B
if(minB <= minA && minA <= maxB) {
if(Float.isNaN(minOverlap) || minA < minOverlap)minOverlap = minA;
}
if(minB <= maxA && maxA <= maxB) {
if(Float.isNaN(maxOverlap) || maxA > minOverlap)maxOverlap = maxA;
}
if(Float.isNaN(minOverlap) || Float.isNaN(maxOverlap))return false; //Pas d'intersection
else return true;//Intersection
}
With that your are able to do a method to test collision between two OBB :
public boolean OBBwOBB(RigidBody bodyA, RigidBody bodyB) {
Shape shapeA = bodyA.getObb().getShape();
Shape shapeB = bodyB.getObb().getShape();
short overlapCompt = 0;
//We test for each normal the projection of the two shape
//Shape A :
for(int i = 0; i < shapeA.getNbrOfNormals(); i++) {
Vector2f normal = shapeA.getNormal(i, bodyA.getAngle());
boolean overlap = overlapOnThisNormal(bodyA, bodyB, normal);
if(overlap) {
overlapCompt++;
}
}
//Shape B :
for(int i = 0; i < shapeB.getNbrOfNormals(); i++) {
Vector2f normal = shapeB.getNormal(i, bodyB.getAngle());
boolean overlap = overlapOnThisNormal(bodyA, bodyB, normal);
if(overlap){
overlapCompt++;
}
}
//Now we see if there is a collision
short howManyNormals = (short) (shapeA.getNbrOfNormals() + shapeB.getNbrOfNormals());
if(overlapCompt == howManyNormals){//If the number of overlap equal the number of normal in both shape :
return true;
}
else return false;
}
And you will need that to get the min and max of the projection of the two shape projected on a vector:
/**
* Test if the orthogonal projection of two shape on a vector overlap.
* #param bodyA
* #param bodyB
* #param normal
* #return null if no overlap, else Vector2f(minOverlaping, maxOverlaping).
*/
public static boolean overlapOnThisNormal(RigidBody bodyA, RigidBody bodyB, Vector2f normal) {
Shape shapeA = bodyA.getObb().getShape();
Shape shapeB = bodyB.getObb().getShape();
//We test each vertex of A
float minA = Float.NaN;
float maxA = Float.NaN;
for(short j = 0; j < shapeA.getNbrOfPoint(); j++){
Vector2f vertex = shapeA.getVertex(j, bodyA.getScale().x, bodyA.getScale().y, bodyA.getPosition().x, bodyA.getPosition().y, bodyA.getAngle());
float bufferA = Vector2f.orthagonalProjectionOf(vertex, normal);
if(Float.isNaN(minA) || bufferA < minA)minA = bufferA;//Set min interval
if(Float.isNaN(maxA) || bufferA > maxA)maxA = bufferA;//Set max interval
}
//We test each vertex of B
float minB = Float.NaN;
float maxB = Float.NaN;
for(short j = 0; j < shapeB.getNbrOfPoint(); j++){
Vector2f vertex = shapeB.getVertex(j, bodyB.getScale().x, bodyB.getScale().y, bodyB.getPosition().x, bodyB.getPosition().y, bodyB.getAngle());
float bufferB = Vector2f.orthagonalProjectionOf(vertex, normal);
if(Float.isNaN(minB) || bufferB < minB)minB = bufferB;//Set min interval
if(Float.isNaN(maxB) || bufferB > maxB)maxB = bufferB;//Set max interval
}
//We test if there overlap
boolean overlap = isOverlapping(minA, maxA, minB, maxB);
return overlap;
}
I hope this helps you ;)

Related

Have slight issue with edge in self made triangle rasterizer C++

The triangles are mostly ok but one or 2 triangles would have a missing pixel between them
which probably means theres a issue with edge cases but i cant figure it out
the white spaces only happen on the edge of the triangles so i assume there is something wrong with either my edge equation or my top left rule but im really lost on what is
wrong with it
struct EdgeEqn {
float a, b, c;
bool tl;
};
GLPbo::EdgeEqn construct(const glm::vec3 a, const glm::vec3& b) {
GLPbo::EdgeEqn x = { a.y - b.y,b.x - a.x,(b.y - a.y) * a.x - (b.x - a.x) * a.y };
if (x.a > 0) {
x.tl = true;
}
else if (x.a < 0) {
x.tl = false;
}
else if (x.b < 0) {
x.tl = true;
}
else {
x.tl = false;
}
return x;
}
bool edgeFunction(GLPbo::EdgeEqn x, const glm::vec2& c, float& line_eval)
{
float eval = x.a * c.x + x.b * c.y + x.c;
line_eval = eval;
return (eval > 0.f || (eval == 0.f && x.tl))
? true : false;
}
bool Topleft_increase(GLPbo::EdgeEqn x, float eval) {
return (eval > 0.f || (eval == 0.f && x.tl))
? true : false;
}
bool GLPbo::render_triangle(glm::vec3 const& p0, glm::vec3 const& p1,
glm::vec3 const& p2, glm::vec3 clr) {
if (((p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y)) < 0) {
culling++;
return false;
}
GLPbo::Color random = { static_cast<GLubyte>(clr.r), static_cast<GLubyte>(clr.g)
, static_cast<GLubyte>(clr.b), 255 };
int min_x = static_cast<int>(std::min({ p0.x, p1.x, p2.x }));
int min_y = static_cast<int>(std::min({ p0.y, p1.y, p2.y }));
int max_x = static_cast<int>(std::max({ p0.x, p1.x, p2.x }));
int max_y = static_cast<int>(std::max({ p0.y, p1.y, p2.y }));
GLPbo::EdgeEqn l0 = construct(p1, p2);
GLPbo::EdgeEqn l1 = construct(p2, p0);
GLPbo::EdgeEqn l2 = construct(p0, p1);
float ev0 = 0, ev1 = 0, ev2 = 0;
bool e0 = edgeFunction(l0, glm::vec2(min_x + 0.5, min_y + 0.5), ev0);
bool e1 = edgeFunction(l1, glm::vec2(min_x + 0.5, min_y + 0.5), ev1);
bool e2 = edgeFunction(l2, glm::vec2(min_x + 0.5, min_y + 0.5), ev2);
for (int y = min_y; y <= max_y; ++y) {
bool hE0 = e0; bool hE1 = e1; bool hE2 = e2;
float hEv0 = ev0; float hEv1 = ev1; float hEv2 = ev2;
for (int x = min_x; x <= max_x; ++x) {
if (hE0 == true && hE1 == true && hE2 == true) {
set_pixel(x, y, random);
}
hEv0 += l0.a;
hEv1 += l1.a;
hEv2 += l2.a;
hE0 = Topleft_increase(l0, hEv0);
hE1 = Topleft_increase(l1, hEv1);
hE2 = Topleft_increase(l2, hEv2);
}
ev0 += l0.b;
ev1 += l1.b;
ev2 += l2.b;
e0 = Topleft_increase(l0, ev0);
e1 = Topleft_increase(l1, ev1);
e2 = Topleft_increase(l2, ev2);
}
return true;
}

SFML Axis independent collision

I've implemented tilemap collision into my game, it works but the problem comes when I'm colliding on one axis and trying to move on the other. I can't slide along the wall.
in Player.cpp
void Player::update(float delta, std::vector<Tile>& tiles) {
if (sf::Keyboard::isKeyPressed(sf::Keyboard::W) || sf::Keyboard::isKeyPressed(sf::Keyboard::Up) || sf::Joystick::getAxisPosition(0, sf::Joystick::Y) < -20) {
newPos.y -= speed * delta;
}
if (sf::Keyboard::isKeyPressed(sf::Keyboard::A) || sf::Keyboard::isKeyPressed(sf::Keyboard::Left) || sf::Joystick::getAxisPosition(0, sf::Joystick::X) < -20) {
newPos.x -= speed * delta;
}
if (sf::Keyboard::isKeyPressed(sf::Keyboard::S) || sf::Keyboard::isKeyPressed(sf::Keyboard::Down) || sf::Joystick::getAxisPosition(0, sf::Joystick::Y) > 20) {
newPos.y += speed * delta;
}
if (sf::Keyboard::isKeyPressed(sf::Keyboard::D) || sf::Keyboard::isKeyPressed(sf::Keyboard::Right) || sf::Joystick::getAxisPosition(0, sf::Joystick::X) > 20) {
newPos.x += speed * delta;
}
sf::Vector2f oldPos = sprite.getPosition();
move(delta, newPos);
for (int i = 0; i < tiles.size(); i++) {
if (Collision::PixelPerfectTest(sprite, tiles[i].sprite) && tiles[i].collision) {
sprite.setPosition(oldPos);
newPos = oldPos;
}
}
}
void Player::move(float delta, sf::Vector2f position) {
sprite.setPosition(position);
}
In Collision.cpp
bool PixelPerfectTest(const sf::Sprite& Object1, const sf::Sprite& Object2, sf::Uint8 AlphaLimit) {
sf::FloatRect Intersection;
if (Object1.getGlobalBounds().intersects(Object2.getGlobalBounds(), Intersection)) {
sf::IntRect O1SubRect = Object1.getTextureRect();
sf::IntRect O2SubRect = Object2.getTextureRect();
sf::Uint8* mask1 = Bitmasks.GetMask(Object1.getTexture());
sf::Uint8* mask2 = Bitmasks.GetMask(Object2.getTexture());
// Loop through our pixels
for (int i = Intersection.left; i < Intersection.left + Intersection.width; i++) {
for (int j = Intersection.top; j < Intersection.top + Intersection.height; j++) {
sf::Vector2f o1v = Object1.getInverseTransform().transformPoint(i, j);
sf::Vector2f o2v = Object2.getInverseTransform().transformPoint(i, j);
// Make sure pixels fall within the sprite's subrect
if (o1v.x > 0 && o1v.y > 0 && o2v.x > 0 && o2v.y > 0 &&
o1v.x < O1SubRect.width && o1v.y < O1SubRect.height &&
o2v.x < O2SubRect.width && o2v.y < O2SubRect.height) {
if (Bitmasks.GetPixel(mask1, Object1.getTexture(), (int)(o1v.x) + O1SubRect.left, (int)(o1v.y) + O1SubRect.top) > AlphaLimit &&
Bitmasks.GetPixel(mask2, Object2.getTexture(), (int)(o2v.x) + O2SubRect.left, (int)(o2v.y) + O2SubRect.top) > AlphaLimit)
return true;
}
}
}
}
return false;
}
That's because your collision test is all or nothing. I would do extra collision tests to see if the x or y new position is valid or not, something like:
if (tiles[i].collision && Collision::PixelPerfectTest(sprite, tiles[i].sprite))
{
sf::Vector2f checkPosX = newPos;
sf::Vector2f checkPosY = newPos;
checkPosX.y = oldPos.y;
checkPosY.x = oldPos.x;
sprite.setPosition(checkPosX);
if (!Collision::PixelPerfectTest(sprite, tiles[i].sprite))
{
newPos = checkPosX;
}
else
{
sprite.setPosition(checkPosY);
if (!Collision::PixelPerfectTest(sprite, tiles[i].sprite))
{
newPos = checkPosY;
}
else
{
sprite.setPosition(oldPos);
newPos = oldPos;
}
}
}
As an aside, if you do test tiles[i].collision first you will skip the more expensive PixelPerfectTest() test for non-collision tiles due to the expression short-circuiting.

Opengl View Transformation matrix Rotation

Recently i implemented a simple Opengl program that composes of a scene of objects, i've applied most of the transformation & projection matrices, in such away that i am able to rotate transform & scale objects, move my camera through z & x coordinates and applied perspective projection however when it comes to camera rotation things get weird, my rotation matrix for my camera is simply a rotation matrix that rotates the world uniformly, however when i rotate the world so that i look in the up direction;+y; and when i move forward, the camera doesn't seem to advance in the direction where it is looking at;as it is the case in FPS games my camera moves relative to the world space, i know that i am missing the vectors that specify directions in x,y,z coordinates, but i am unable to incorporate these vectors with my camera (view Transformation) matrix, most of the tutorial on internet either describes it in a block diagram or uses the conventional gluLookAt() function, i really need a brief explanation about view Transformations and specifically camera rotation and how i should implement it in my matrices, my my final matrix is as follows:
resultTransform = perspectiveTrans * cameraTrans * modelTrans;
where:
perspectiveTrans = applies only a perspective projection transformation
cameraTrans = is a combination of rotate,translate matrices that affect all obj.s in the scene
modelTrans =is the transformation that is applied to the models
Matrix4X4.cpp file:
#include "Matrix4X4.h"
using namespace std;
////////////////////////////////// Constructor Declerations ////////////////////////////////
Matrix4X4::Matrix4X4()
{
setIdentity();
}
Matrix4X4::Matrix4X4(float value)
{
for(int i = 0 ; i < 4; i++)
for ( int j = 0; j < 4; j++)
Matrix[i][j] = value;
}
/////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Destructor Decleration //////////////////////////////
Matrix4X4::~Matrix4X4()
{
}
///////////////////////////////////////////////////////////////////////////////////
/////////////////////// Set Identity Matrix /////////////////////////////////////////
void Matrix4X4::setIdentity()
{
Matrix[0][0] =1; Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = 0;
Matrix[1][0] =0; Matrix[1][1] = 1; Matrix[1][2] = 0; Matrix[1][3] = 0;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = 1; Matrix[2][3] = 0;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 0; Matrix[3][3] = 1;
}
///////////////////////////////////////////////////////////////////////////////////
///////////////////////// Set Translation Matrix //////////////////////////////////
Matrix4X4 Matrix4X4::setTranslation(float x,float y,float z)
{
Matrix[0][0] =1; Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = x;
Matrix[1][0] =0; Matrix[1][1] = 1; Matrix[1][2] = 0; Matrix[1][3] = y;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = 1; Matrix[2][3] = z;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 0; Matrix[3][3] = 1;
return *this;
}
/////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////// Set Rotation Matrix ///////////////////////////////////////////
Matrix4X4 Matrix4X4::setRotation(float x,float y,float z)
{
Matrix4X4 xRot;
Matrix4X4 yRot;
Matrix4X4 zRot;
x = (float)x * 3.14/ 180.0;
y = (float)y * 3.14/ 180.0;
z = (float)z * 3.14/ 180.0;
xRot.Matrix[0][0] =1; xRot.Matrix[0][1] = 0; xRot.Matrix[0][2] = 0; xRot.Matrix[0][3] = 0;
xRot.Matrix[1][0] =0; xRot.Matrix[1][1] = cosf(x); xRot.Matrix[1][2] = -sinf(x); xRot.Matrix[1][3] = 0;
xRot.Matrix[2][0] =0; xRot.Matrix[2][1] = sinf(x); xRot.Matrix[2][2] = cosf(x); xRot.Matrix[2][3] = 0;
xRot.Matrix[3][0] =0; xRot.Matrix[3][1] = 0; xRot.Matrix[3][2] = 0; xRot.Matrix[3][3] = 1;
yRot.Matrix[0][0] = cosf(y); yRot.Matrix[0][1] = 0; yRot.Matrix[0][2] = -sinf(y); yRot.Matrix[0][3] = 0;
yRot.Matrix[1][0] =0; yRot.Matrix[1][1] = 1; yRot.Matrix[1][2] = 0; yRot.Matrix[1][3] = 0;
yRot.Matrix[2][0] = sinf(y); yRot.Matrix[2][1] = 0; yRot.Matrix[2][2] = cosf(y); yRot.Matrix[2][3] = 0;
yRot.Matrix[3][0] =0; yRot.Matrix[3][1] = 0; yRot.Matrix[3][2] = 0; yRot.Matrix[3][3] = 1;
zRot.Matrix[0][0] = cosf(z); zRot.Matrix[0][1] = -sinf(z); zRot.Matrix[0][2] = 0; zRot.Matrix[0][3] = 0;
zRot.Matrix[1][0] = sinf(z); zRot.Matrix[1][1] = cosf(z); zRot.Matrix[1][2] = 0; zRot.Matrix[1][3] = 0;
zRot.Matrix[2][0] =0; zRot.Matrix[2][1] = 0; zRot.Matrix[2][2] = 1; zRot.Matrix[2][3] = 0;
zRot.Matrix[3][0] =0; zRot.Matrix[3][1] = 0; zRot.Matrix[3][2] = 0; zRot.Matrix[3][3] = 1;
return (zRot * yRot * xRot) ;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////// Set Scale Matrix //////////////////////////////////////////
Matrix4X4 Matrix4X4::setScale(float x,float y,float z)
{
Matrix[0][0] =x; Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = 0;
Matrix[1][0] =0; Matrix[1][1] = y; Matrix[1][2] = 0; Matrix[1][3] = 0;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = z; Matrix[2][3] = 0;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 0; Matrix[3][3] = 1;
return *this;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////// Set Perspective Projection ///////////////////////////////////////
void Matrix4X4::setPerspective(float fov,float aRatio,float zNear,float zFar)
{
fov = (fov/2) * 3.14 / 180.0;
float tanHalfFOV = tanf(fov);
float zRange = zNear - zFar;
Matrix[0][0] =1.0f / (tanHalfFOV * aRatio); Matrix[0][1] = 0; Matrix[0][2] = 0; Matrix[0][3] = 0;
Matrix[1][0] =0; Matrix[1][1] = 1.0f / tanHalfFOV; Matrix[1][2] = 0; Matrix[1][3] = 0;
Matrix[2][0] =0; Matrix[2][1] = 0; Matrix[2][2] = (-zNear - zFar)/ zRange; Matrix[2][3] = 2* zFar * zNear / zRange;
Matrix[3][0] =0; Matrix[3][1] = 0; Matrix[3][2] = 1; Matrix[3][3] = 0;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////// Getters & Setters ////////////////////////////////////////////
float * Matrix4X4::getMat()
{
return (float *) Matrix;
}
float Matrix4X4::getMember(int x, int y) const
{
return Matrix[x][y];
}
void Matrix4X4::setMat(int row,int col,float value)
{
Matrix[row][col] = value;
}
/////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////// (*) Operator Overload //////////////////////////////////////
Matrix4X4 operator * (const Matrix4X4 & lhs,const Matrix4X4 & rhs)
{
Matrix4X4 result;
for(int i = 0 ; i < 4; i++)
for ( int j = 0; j < 4; j++)
result.setMat(i, j, lhs.getMember(i,0) * rhs.getMember(0, j) +
lhs.getMember(i,1) * rhs.getMember(1, j) +
lhs.getMember(i,2) * rhs.getMember(2, j) +
lhs.getMember(i,3) * rhs.getMember(3, j));
return result;
}
//////////////////////////////////////////////////////////////////////////////////////////////////
the Transformation code i use in my main block:
SDL_PumpEvents();
for (int x = 0; x< 256; x++)
{
if (state[x] == 1 )
{
if(x == 26)
tranForward -= 0.001;
if (x == 22)
tranForward += 0.001;
if (x == 4)
tranRight += 0.0009;
if (x == 7)
tranRight -= 0.0009;
if (x == 82)
lookUp += 0.02;
if (x == 81)
lookUp -= 0.02;
if (x == 80)
lookRight -= 0.02;
if (x == 79)
lookRight += 0.02;
}
}
modelTrans = Translation.setTranslation(0, 0, 5) * Scale.setScale(0.5, 0.5, 0.5);
camTrans = Rotation.setRotation(lookUp, lookRight, 0) * Translation.setTranslation(tranRight, 0, tranForward);
Projection.setPerspective(70, win.getWidth()/win.getHeight(), 0.1, 1000);
result = Projection * camTrans * modelTrans;
glUniformMatrix4fv(uniformloc, 1, GL_TRUE, result.getMat());
The matrix multiplication does not have the same rules as the scalar multiplication and in your case A*B does NOT equal B*A when multiplying the matrices. If rest of the code is good your solution might simply be turning
result = Projection * camTrans * modelTrans;
into
result = Projection * (modelTrans * camTrans);
Do alway watch out for both, multiplication order and parentheses when dealing with anything but scalar values.
In general when you are combining a translation and rotation matrix you need to think in matrix own space coordinate system, that means like playing a FPS:
Multiplying rotation*translation means the object will rotate first and then translate meaning the object position will depend on the rotation being already applied and a 180 degrees rotation will translate the object backwards from the 3rd view perspective.
Multiplying translation*rotation means the object will translate first and then rotate meaning it will in fact be moved into the same direction no matter the rotation, only the direction of where the object is facing will be changed by rotation matrix.
Just a nice example, if you want to present a movement of earth around sun (the earth is circling the sun while rotating around its own axis being on some radius):
Matrix4X4 orbitRotation; //rotation matrix for where in orbit the object is
Matrix4X4 objectRotation; //object rotation around its own axis
Matrix4X4 orbitRadius; //object orbit radius
Matrix4X4 result = (orbitRotation*orbitRadius)*objectRotation;
my code seemed to ignore the previous matrix calculation and re calculated the transformations with respect to my scene's initial state, the desired world rotation & Translation is achieved by using a fixed value for rotation & Translation, the modified code blocks are as follows:
for (int x = 0; x< 256; x++)
{
if (state[x] == 1 )
{
if(x == 26)
tranForward = -0.001;
if (x == 22)
tranForward = 0.001;
if (x == 4)
tranRight = 0.0009;
if (x == 7)
tranRight = -0.0009;
if (x == 82)
lookUp = 0.02;
if (x == 81)
lookUp = -0.02;
if (x == 80)
lookRight = -0.02;
if (x == 79)
lookRight = 0.02;
}
}
camTrans = Rotation.setRotation(lookUp, lookRight, 0) * Translation.setTranslation(tranRight, 0, tranForward);
result = camTrans * result;
modelTrans = Projection * result;
tranForward = 0.0;
tranRight = 0.0;
lookUp = 0.0;
lookRight = 0.0;
glUniformMatrix4fv(uniformloc, 1, GL_TRUE, modelTrans.getMat());
note that result matrix keeps track of the previous state and the current state transformations are applied with respect to it.

Bounding Volume Hierarchy ray traversal issues

I've successfully implemented BVH as described in PBRT. This one although has a slightly huge issue - the traversal looks through ALL nodes that intersect the ray, which is wrong (in terms of performance).
So I ended up optimizing the ray traversal, currently I use the version from Aila & Laine implementation of their "Understanding the efficiency of ray traveral on GPU". First, here is the code:
INLINE bool BVH::Traverse(TriangleWoop* prims, Ray* ray, IntersectResult* result)
{
unsigned int todo[32];
unsigned int todoOffset = 0;
unsigned int nodeNum = 0;
bool hit = false;
IntersectResult tmp = IntersectResult();
*(int*)&tmp.data.w = -1;
float tmin = 2e30f;
float4 origin = ray->origin;
float4 direction = ray->direction;
float4 invdir = rcp(direction);
float tmpx = 0.0f, tmpy = 0.0f;
while(true)
{
while(this->nodes[nodeNum].prim_count == 0)
{
tmpx += 0.01f;
tmpy += 0.001f;
float4 c0v1 = (this->nodes[nodeNum + 1].bounds.minPt - origin) * invdir;
float4 c0v2 = (this->nodes[nodeNum + 1].bounds.maxPt - origin) * invdir;
float4 c1v1 = (this->nodes[this->nodes[nodeNum].above_child].bounds.minPt - origin) * invdir;
float4 c1v2 = (this->nodes[this->nodes[nodeNum].above_child].bounds.maxPt - origin) * invdir;
float4 c0n = f4min(c0v1, c0v2);
float4 c0f = f4max(c0v1, c0v2);
float4 c1n = f4min(c1v1, c1v2);
float4 c1f = f4max(c1v1, c1v2);
float n0 = max(c0n.x, max(c0n.y, c0n.z));
float f0 = min(c0f.x, min(c0f.y, c0f.z));
float n1 = max(c1n.x, max(c1n.y, c1n.z));
float f1 = min(c1f.x, min(c1f.y, c1f.z));
bool child0 = (f0 > 0.0f) && (n0 < f0);
bool child1 = (f1 > 0.0f) && (n1 < f1);
child0 &= (n0 < tmin);
child1 &= (n1 < tmin);
unsigned int nodeAddr = this->nodes[nodeNum].above_child;
nodeNum = nodeNum + 1;
if(child0 != child1)
{
if(child1)
{
nodeNum = nodeAddr;
}
}
else
{
if(!child0)
{
if(todoOffset == 0)
{
goto result;
}
nodeNum = todo[--todoOffset];
}
else
{
if(n1 < n0)
{
swap(nodeNum, nodeAddr);
}
todo[todoOffset++] = nodeAddr;
}
}
}
if(this->nodes[nodeNum].prim_count > 0)
{
for(unsigned int i = this->nodes[nodeNum].prim_offset; i < this->nodes[nodeNum].prim_offset + this->nodes[nodeNum].prim_count; i++)
{
const TriangleWoop* tri = &prims[this->indexes[i]];
if(IntersectRayTriangleWoop(ray, tri, &tmp))
{
if(tmp.data.z > 0.0f && tmp.data.z < result->data.z)
{
tmin = tmp.data.z;
result->data.z = tmp.data.z;
result->data.x = tmp.data.x;
result->data.y = tmp.data.y;
*(int*)&result->data.w = this->indexes[i];
hit = true;
}
}
}
}
if(todoOffset == 0)
{
goto result;
}
nodeNum = todo[--todoOffset];
}
result:
result->data.x = tmpx;
result->data.y = tmpy;
return hit;
}
Technically it's just a standard while-while stack ray-bvh traversal. Now to the main problem, look at next image (viewing sponza from outside), in color you can see how much nodes in BVH has been visited (full red = 100, full yellow = 1100):
Next image shows similar situation inside:
As you can see this is kind of a problem - it just has to traverse much more nodes than it's supposed to. Can someone see something wrong with my code? Any advice is welcomed as I'm stucked with this for few days already and can't think off some solution.

ray tracing triangular mesh objects

I'm trying to write a ray tracer for any objects formed of triangular meshes. I'm using an external library to load a cube from .ply format and then trace it down. So far, I've implemented most of the tracer, and now I'm trying to test it with a single cube, but for some reason all I get on the screen is a red line. I've tried several ways to fix it but I simply can't figure it out anymore. For this primary test, I'm only creating primary rays, and if they hit my cube, then I color that pixel to the cube's diffuse color and return. For checking ray-object intersections, I am going through all the triangles that form that object and return the distance to the closest one. It would be great if you could have a look at the code and tell me what could have gone wrong and where. I would greatly appreciate it.
Ray-Triangle intersection:
bool intersectTri(const Vec3D& ray_origin, const Vec3D& ray_direction, const Vec3D& v0, const Vec3D& v1, const Vec3D& v2, double &t, double &u, double &v) const
{
Vec3D edge1 = v1 - v0;
Vec3D edge2 = v2 - v0;
Vec3D pvec = ray_direction.cross(edge2);
double det = edge1.dot(pvec);
if (det > - THRESHOLD && det < THRESHOLD)
return false;
double invDet = 1/det;
Vec3D tvec = ray_origin - v0;
u = tvec.dot(pvec)*invDet;
if (u < 0 || u > 1)
return false;
Vec3D qvec = tvec.cross(edge1);
v = ray_direction.dot(qvec)*invDet;
if (v < 0 || u + v > 1)
return false;
t = edge2.dot(qvec)*invDet;
if (t < 0)
return false;
return true;
}
//Object intersection
bool intersect(const Vec3D& ray_origin, const Vec3D& ray_direction, IntersectionData& idata, bool enforce_max) const
{
double tClosest;
if (enforce_max)
{
tClosest = idata.t;
}
else
{
tClosest = TMAX;
}
for (int i = 0 ; i < indices.size() ; i++)
{
const Vec3D v0 = vertices[indices[i][0]];
const Vec3D v1 = vertices[indices[i][1]];
const Vec3D v2 = vertices[indices[i][2]];
double t, u, v;
if (intersectTri(ray_origin, ray_direction, v0, v1, v2, t, u, v))
{
if (t < tClosest)
{
idata.t = t;
tClosest = t;
idata.u = u;
idata.v = v;
idata.index = i;
}
}
}
return (tClosest < TMAX && tClosest > 0) ? true : false;
}
Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction)
{
Vec3D objColor = world.background_color;
IntersectionData idata;
double coeff = 1.0;
int depth = 0;
double tClosest = TMAX;
Object *hitObject = NULL;
for (unsigned int i = 0 ; i < world.objs.size() ; i++)
{
IntersectionData idata_curr;
if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false))
{
if (idata_curr.t < tClosest && idata_curr.t > 0)
{
idata.t = idata_curr.t;
idata.u = idata_curr.u;
idata.v = idata_curr.v;
idata.index = idata_curr.index;
tClosest = idata_curr.t;
hitObject = &(world.objs[i]);
}
}
}
if (hitObject == NULL)
{
return world.background_color;
}
else
{
return hitObject->getDiffuse();
}
}
int main(int argc, char** argv)
{
parse("cube.ply");
Vec3D diffusion1(1, 0, 0);
Vec3D specular1(1, 1, 1);
Object cube1(coordinates, connected_vertices, diffusion1, specular1, 0, 0);
World wrld;
// Add objects to the world
wrld.objs.push_back(cube1);
Vec3D background(0, 0, 0);
wrld.background_color = background;
// Set light color
Vec3D light_clr(1, 1, 1);
wrld.light_colors.push_back(light_clr);
// Set light position
Vec3D light(0, 64, -10);
wrld.light_positions.push_back(light);
int width = 128;
int height = 128;
Vec3D *image = new Vec3D[width*height];
Vec3D *pixel = image;
// Trace rays
for (int y = -height/2 ; y < height/2 ; ++y)
{
for (int x = -width/2 ; x < width/2 ; ++x, ++pixel)
{
Vec3D ray_dir(x+0.5, y+0.5, -1.0);
ray_dir.normalize();
Vec3D ray_orig(0.5*width, 0.5*height, 0.0);
*pixel = trace(wrld, ray_orig, ray_dir);
}
}
savePPM("./test.ppm", image, width, height);
return 0;
}
I've just ran a test case and I got this:
for a unit cube centered at (0,0, -1.5) and scaled on the X and Y axis by 100. It seems that there is something wrong with the projection, but I can't really tell exactly what from the result. Also, shouldn't, in this case (cube is centered at (0,0)) the final object also appear in the middle of the picture?
FIX: I fixed the centering problem by doing ray_dir = ray_dir - ray_orig before normalizing and calling the trace function. Still, the perspective seems to be plain wrong.
I continued the work and now I started implementing the diffuse reflection according to Phong.
Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction)
{
Vec3D objColor = Vec3D(0);
IntersectionData idata;
double coeff = 1.0;
int depth = 0;
do
{
double tClosest = TMAX;
Object *hitObject = NULL;
for (unsigned int i = 0 ; i < world.objs.size() ; i++)
{
IntersectionData idata_curr;
if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false))
{
if (idata_curr.t < tClosest && idata_curr.t > 0)
{
idata.t = idata_curr.t;
idata.u = idata_curr.u;
idata.v = idata_curr.v;
idata.index = idata_curr.index;
tClosest = idata_curr.t;
hitObject = &(world.objs[i]);
}
}
}
if (hitObject == NULL)
{
return world.background_color;
}
Vec3D newStart = ray_origin + ray_direction*idata.t;
// Compute normal at intersection by interpolating vertex normals (PHONG Idea)
Vec3D v0 = hitObject->getVertices()[hitObject->getIndices()[idata.index][0]];
Vec3D v1 = hitObject->getVertices()[hitObject->getIndices()[idata.index][1]];
Vec3D v2 = hitObject->getVertices()[hitObject->getIndices()[idata.index][2]];
Vec3D n1 = hitObject->getNormals()[hitObject->getIndices()[idata.index][0]];
Vec3D n2 = hitObject->getNormals()[hitObject->getIndices()[idata.index][1]];
Vec3D n3 = hitObject->getNormals()[hitObject->getIndices()[idata.index][2]];
// Vec3D N = n1 + (n2 - n1)*idata.u + (n3 - n1)*idata.v;
Vec3D N = v0.computeFaceNrm(v1, v2);
if (ray_direction.dot(N) > 0)
{
N = N*(-1);
}
N.normalize();
Vec3D lightray_origin = newStart;
for (unsigned int itr = 0 ; itr < world.light_positions.size() ; itr++)
{
Vec3D lightray_dir = world.light_positions[0] - newStart;
lightray_dir.normalize();
double cos_theta = max(N.dot(lightray_dir), 0.0);
objColor.setX(objColor.getX() + hitObject->getDiffuse().getX()*hitObject->getDiffuseReflection()*cos_theta);
objColor.setY(objColor.getY() + hitObject->getDiffuse().getY()*hitObject->getDiffuseReflection()*cos_theta);
objColor.setZ(objColor.getZ() + hitObject->getDiffuse().getZ()*hitObject->getDiffuseReflection()*cos_theta);
return objColor;
}
depth++;
} while(coeff > 0 && depth < MAX_RAY_DEPTH);
return objColor;
}
When I reach an object with the primary ray, I send another ray to the light source positioned at (0,0,0) and return the color according to the Phong illumination model for diffuse reflection, but the result is really not the expected one: http://s15.postimage.org/vc6uyyssr/test.png. The cube is a unit cube centered at (0,0,0) and then translated by (1.5, -1.5, -1.5). From my point of view, the left side of the cube should get more light and it actually does. What do you think of it?