Get world coordinates from D3DXIntersectTri - c++

I have a square area on which I have to determine where the mouse pointing.
With D3DXIntersectTri I can tell IF the mouse pointing on it, but I have trouble calculating the x,y,z coordinates.
The drawing from vertex buffer, which initialized with the vertices array:
vertices[0].position = D3DXVECTOR3(-10, 0, -10);
vertices[1].position = D3DXVECTOR3(-10, 0, 10);
vertices[2].position = D3DXVECTOR3( 10, 0, -10);
vertices[3].position = D3DXVECTOR3( 10, 0, -10);
vertices[4].position = D3DXVECTOR3(-10, 0, 10);
vertices[5].position = D3DXVECTOR3( 10, 0, 10);
I have this method so far, this is not giving me the right coordinates (works only on a small part of the area, near two of the edges and more less accurate inside):
BOOL Area::getcoord( Ray& ray, D3DXVECTOR3& coord)
{
D3DXVECTOR3 rayOrigin, rayDirection;
rayDirection = ray.direction;
rayOrigin = ray.origin;
float d;
D3DXMATRIX matInverse;
D3DXMatrixInverse(&matInverse, NULL, &matWorld);
// Transform ray origin and direction by inv matrix
D3DXVECTOR3 rayObjOrigin,rayObjDirection;
D3DXVec3TransformCoord(&rayOrigin, &rayOrigin, &matInverse);
D3DXVec3TransformNormal(&rayDirection, &rayDirection, &matInverse);
D3DXVec3Normalize(&rayDirection,&rayDirection);
float u, v;
BOOL isHit1, isHit2;
D3DXVECTOR3 p1, p2, p3;
p1 = vertices[3].position;
p2 = vertices[4].position;
p3 = vertices[5].position;
isHit1 = D3DXIntersectTri(&p1, &p2, &p3, &rayOrigin, &rayDirection, &u, &v, &d);
isHit2 = FALSE;
if(!isHit1)
{
p1 = vertices[0].position;
p2 = vertices[1].position;
p3 = vertices[2].position;
isHit2 = D3DXIntersectTri(&p1, &p2, &p3, &rayOrigin, &rayDirection, &u, &v, &d);
}
if(isHit1)
{
coord.x = 1 * ((1-u-v)*p3.x + u*p3.y + v*p3.z);
coord.y = 0.2f;
coord.z = -1 * ((1-u-v)*p1.x + u*p1.y + v*p1.z);
D3DXVec3TransformCoord(&coord, &coord, &matInverse);
}
if(isHit2)
{
coord.x = -1 * ((1-u-v)*p3.x + u*p3.y + v*p3.z);
coord.y = 0.2f;
coord.z = 1 * ((1-u-v)*p1.x + u*p1.y + v*p1.z);
D3DXVec3TransformCoord(&coord, &coord, &matWorld);
}
return isHit1 || isHit2;
}

Barycentric coordinates don't work the way you used them. u and v define the weight of the source vectors. So if you want to calculate the hit point, you will have to compute
coord = u * p1 + v * p2 + (1 - u - v) * p3
Alternatively you can use the d ray parameter:
coord = rayOrigin + d * rDirection
Both ways should result in the same coordinate.

Related

Raytracing program fails to detect intersections in c++

I am working on a simple raytracer in c++. I am currently implementing an intersection function but have encountered some issues.
For some reason, the collision detection only works for a tiny rectangle in my image. In the image below you can see that it draws the room quite fine for a small part of the screen but fails to do so for the rest of the scene. Only a small section gets drawn correctly.
Why does my intersection detection not work? I have included the code for the intersection and draw function below.
LoadTestModel(m_Model);
m_Light.position = glm::vec3(0.0f, -1.0f, 0.0);
m_Light.color = glm::vec3(0.f, 0.f, 0.f);
m_Light.ambient = glm::vec3(0.5f, 0.5f, 0.5f);
m_Camera.position = glm::vec3(0.0, 0.0, -2.0);
m_Camera.yaw = 0.0f;
}
void Lab2Scene::Draw(Window& window)
{
if (!m_RenderNext) return;
m_RenderNext = false;
for (uint32_t y = 0; y < window.GetHeight(); ++y)
{
for (uint32_t x = 0; x < window.GetWidth(); ++x)
{
Ray ray = {};
glm::vec3 d(x - (window.GetWidth() / 2), y - (window.GetHeight() / 2), (window.GetHeight() / 2));
d = glm::normalize(d);
ray.direction = d * m_Camera.GetRotationY();
ray.start = m_Camera.position;
// Find the closest intersection of the casted ray.
Intersection nearest_intersection = {};
if (ClosestIntersection(ray, m_Model, nearest_intersection))
{
//window.PutPixel(x, y, glm::vec3(1.f, 0.f, 0.f));
window.PutPixel(x, y, DirectLight(m_Light, nearest_intersection, m_Model) + m_Model[nearest_intersection.triangleIndex].color * m_Light.ambient); // DirectLight(m_Light, intersection, m_Model)
}
else
{
window.PutPixel(x, y, m_Light.color);
}
}
}
}
bool Lab2Scene::ClosestIntersection(const Ray& ray, const std::vector<Triangle>& triangles, Intersection& intersection)
{
float m = std::numeric_limits<float>::max();
intersection.distance = m;
bool inters = false;
for (int i = 0; i < triangles.size(); ++i) {
float dot = glm::dot(ray.direction, triangles[i].normal);
if (dot != 0) {
using glm::vec3;
using glm::mat3;
vec3 v0 = triangles[i].v0;
vec3 v1 = triangles[i].v1;
vec3 v2 = triangles[i].v2;
vec3 e1 = v1 - v0;
vec3 e2 = v2 - v0;
vec3 b = ray.start - v0;
mat3 A(-ray.direction, e1, e2);
vec3 x = glm::inverse(A) * b;
if (x[1] >= 0 && x[2] >= 0 && x[1] + x[2] <= 1 && x[0] >= 0) {
vec3 intersect = ray.start + (x[0] * ray.direction);
if (glm::distance(ray.start, intersect) <= intersection.distance) {
intersection.position = intersect;
intersection.distance = glm::distance(ray.start, intersect);
intersection.triangleIndex = i;
inters = true;
}
}
}
}
return inters;
}

OpenGl: Problem with Arcball Camera Rotation

i need to implement arcball camera. I got something similar, but it works very crookedly (the angle changes sharply, when turning to the right / left, the camera raises up / down strongly).
Here is my source code, can you tell me where I went wrong:
bool get_arcball_vec(double x, double y, glm::vec3& a)
{
glm::vec3 vec = glm::vec3((2.0 * x) / window.getWidth() - 1.0, 1.0 - (2.0 * y) / window.getHeight(), 0.0);
if (glm::length(vec) >= 1.0)
{
vec = glm::normalize(vec);
}
else
{
vec.z = sqrt(1.0 - pow(vec.x, 2.0) - pow(vec.y, 2.0));
}
a = vec;
return true;
}
...
void onMouseMove(double x, double y) {
if (rightMouseButtonPressed) {
glm::vec3 a,b;
cur_mx = x;
cur_my = y;
if (cur_mx != last_mx || cur_my != last_my)
if (get_arcball_vec(last_mx, last_my, a) && get_arcball_vec(cur_mx, cur_my, b))
viewport.getCamera().orbit(a,b);
last_mx = cur_mx;
last_my = cur_my;
...
void Camera::orbit(glm::vec3 a, glm::vec3 b)
{
forward = calcForward();
right = calcRight();
double alpha = acos(glm::min(1.0f, glm::dot(b, a)));
glm::vec3 axis = glm::cross(a, b);
glm::mat4 rotationComponent = glm::mat4(1.0f);
rotationComponent[0] = glm::vec4(right, 0.0f);
rotationComponent[1] = glm::vec4(up, 0.0f);
rotationComponent[2] = glm::vec4(forward, 0.0f);
glm::mat4 toWorldCameraSpace = glm::transpose(rotationComponent);
axis = toWorldCameraSpace * glm::vec4(axis, 1.0);
glm::mat4 orbitMatrix = glm::rotate(glm::mat4(1.0f), (float)alpha, axis);
eye = glm::vec4(target, 1.0) + orbitMatrix * glm::vec4(eye - target, 1.0f);
up = orbitMatrix * glm::vec4(up, 1.0f);
}
I use this code to map 2D mouse position to the sphere:
Vector3 GetArcBallVector(const Vector2f & mousePos) {
float radiusSquared = 1.0; //squared radius of the sphere
//compute mouse position from the centre of screen to interval [-half, +half]
Vector3 pt = Vector3(
mousePos.x - halfScreenW,
halfScreenH - mousePos.y,
0.0f
);
//if length squared is smaller than sphere diameter
//point is inside
float lengthSqr = pt.x * pt.x + pt.y * pt.y;
if (lengthSqr < radiusSquared){
//inside
pt.z = std::sqrtf(radiusSquared - lengthSqr);
}
else {
pt.z = 0.0f;
}
pt.z *= -1;
return pt;
}
To calculate rotation, I use the last (startPt) and current (endPt) mapped position and do:
Quaternion actRot = Quaternion::Identity();
Vector3 axis = Vector3::Cross(endPt, startPt);
if (axis.LengthSquared() > MathUtils::EPSILON) {
float angleCos = Vector3::Dot(endPt, startPt);
actRot = Quaternion(axis.x, axis.y, axis.z, angleCos);
}
I prefer to use Quaternions over matrices since they are easy to multiply (for acumulated rotation) and interpolate (for some smooting).

Bad Point Transformation using Matrix (Direct3D)

My problem is a little bit extense. I'm trying to print a sprite using DirectX 11 and to handle the scale and rotations I use a transformation matrix. I extracted this code from SFML, an open-source library, and for the translation works fine. However, the rotation and scale aren't working as expected as I show next.
When I rotate across the center all is correct.
When I move the sprite and then I rotate, the sprite rotates around an "unknown" point.
The same happens when I scale the sprite, it scales himself from an "unknown" point.
If I rotate the sprite, then the translation axis is the same but with the sprite rotated, so I can't move it properly.
I leave you here a little video of exactly what's going on. [VIDEO]
I think the problem is related to how I transform the sprite, but I can't assure it. I leave you here the code parts I think are involved in this error but you also can take a look into the GitHub project to take a deep look. [GITHUB PROJECT]
Transformable.cpp
const Transform& Transformable::GetTransform() const
{
if (_transformNeedUpdate)
{
float angle = -_rotation * 3.141592654f / 180.f;
float cosine = static_cast<float>(std::cos(angle));
float sine = static_cast<float>(std::sin(angle));
float sxc = _scale.x * cosine;
float syc = _scale.y * cosine;
float sxs = _scale.x * sine;
float sys = _scale.y * sine;
float tx = -_origin.x * sxc - _origin.y * sys + _position.x;
float ty = _origin.x * sxs - _origin.y * syc + _position.y;
_transform = Transform(sxc, sys, tx,
-sxs, syc, ty,
0.f, 0.f, 1.f);
_transformNeedUpdate = false;
}
return _transform;
}
Transform.cpp
Transform::Transform(
float a00, float a01, float a02,
float a10, float a11, float a12,
float a20, float a21, float a22
)
{
_matrix[0] = a00; _matrix[4] = a01; _matrix[8] = 0.f; _matrix[12] = a02;
_matrix[1] = a10; _matrix[5] = a11; _matrix[9] = 0.f; _matrix[13] = a12;
_matrix[2] = 0.f; _matrix[6] = 0.f; _matrix[10] = 1.f; _matrix[14] = 0.f;
_matrix[3] = a20; _matrix[7] = a21; _matrix[11] = 0.f; _matrix[15] = a22;
}
D3DXVECTOR2 Transform::TransformPoint(float x, float y) const
{
return D3DXVECTOR2(_matrix[0] * x + _matrix[4] * y + _matrix[12],
_matrix[1] * x + _matrix[5] * y + _matrix[13]);
}
D3DXVECTOR2 operator *(const Transform& left, const D3DXVECTOR2& right)
{
return left.TransformPoint(right);
}
Bitmap.cpp (Where I setup the vertex to be drawn)
HRESULT Bitmap::UpdateBuffers(ID3D11DeviceContext* deviceContext)
{
if (_transform == _previousTransform && _bounds == _previousBounds)
{
return S_OK;
}
VertexType* vertices;
D3D11_MAPPED_SUBRESOURCE mappedResource;
VertexType* verticesPtr;
HRESULT result;
_previousTransform = _transform;
_previousBounds = _bounds;
vertices = new VertexType[_vertexCount];
if (!vertices)
{
return CO_E_ERRORINAPP;
}
float left = _bounds.left();
float right = left + _bounds.width();
float top = _bounds.top();
float bottom = top + _bounds.height();
D3DXVECTOR2 topLeft = { left, top };
D3DXVECTOR2 bottomRight = { right, bottom };
D3DXVECTOR2 topRight = { right, top };
D3DXVECTOR2 bottomLeft = { left, bottom };
topLeft = _transform * topLeft;
bottomRight = _transform * bottomRight;
topRight = _transform * topRight;
bottomLeft = _transform * bottomLeft;
vertices[0].position = D3DXVECTOR3(topLeft.x, topLeft.y, 0.0f);
vertices[0].texture = D3DXVECTOR2(0.0f, 0.0f);
vertices[1].position = D3DXVECTOR3(bottomRight.x, bottomRight.y, 0.0f);
vertices[1].texture = D3DXVECTOR2(1.0f, 1.0f);
vertices[2].position = D3DXVECTOR3(bottomLeft.x, bottomLeft.y, 0.0f);
vertices[2].texture = D3DXVECTOR2(0.0f, 1.0f);
vertices[3].position = D3DXVECTOR3(topLeft.x, topLeft.y, 0.0f);
vertices[3].texture = D3DXVECTOR2(0.0f, 0.0f);
vertices[4].position = D3DXVECTOR3(topRight.x, topRight.y, 0.0f);
vertices[4].texture = D3DXVECTOR2(1.0f, 0.0f);
vertices[5].position = D3DXVECTOR3(bottomRight.x, bottomRight.y, 0.0f);
vertices[5].texture = D3DXVECTOR2(1.0f, 1.0f);
result = deviceContext->Map(_vertexBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
if (FAILED(result))
{
return CO_E_ERRORINAPP;
}
verticesPtr = (VertexType*)mappedResource.pData;
memcpy(verticesPtr, (void*)vertices, (sizeof(VertexType) * _vertexCount));
deviceContext->Unmap(_vertexBuffer, 0);
delete[] vertices;
vertices = 0;
return S_OK;
}
The Coordinate System is like the shown in the image below; the center of the screen is the (0,0) position.
The code flow
When the position, scale, rotation or origin of the actor changes, a new Transform is generated. Then, I transform the local bounds of the actor with the generated transform to get the correct positions of the vertex, and I set them into the vertex buffer.

C++ Raytracer with opengl display skew in specific resolution

I have a ray tracer (from www.scratchapixel.com) that I use to write a image to memory that I then display at once using Opengl (glut). I use the width and height and divide the screen to get a Opengl point for every pixels. It kinda works.
My problem is that my width has to be between 500 and 799. It cannot be <= 499 of >= 800, witch doesn't make sense to me. The image becomes skew. I have tried it on 2 computers with the same result.
799x480
800x480
Here's the full code:
#define _USE_MATH_DEFINES
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <fstream>
#include <vector>
#include <iostream>
#include <cassert>
// OpenGl
#include "GL/glut.h"
GLuint width = 799, height = 480;
GLdouble width_step = 2.0f / width, height_step = 2.0f / height;
const int MAX_RAY_DEPTH = 3;
const double INFINITY = HUGE_VAL;
template<typename T>
class Vec3
{
public:
T x, y, z;
// Vector constructors.
Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
Vec3(T xx) : x(xx), y(xx), z(xx) {}
Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
// Vector normalisation.
Vec3& normalize()
{
T nor = x * x + y * y + z * z;
if (nor > 1) {
T invNor = 1 / sqrt(nor);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
// Vector operators.
Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
};
template<typename T>
class Sphere
{
public:
// Sphere variables.
Vec3<T> center; /// position of the sphere
T radius, radius2; /// sphere radius and radius^2
Vec3<T> surfaceColor, emissionColor; /// surface color and emission (light)
T transparency, reflection; /// surface transparency and reflectivity
// Sphere constructor.
// position(c), radius(r), surface color(sc), reflectivity(refl), transparency(transp), emission color(ec)
Sphere(const Vec3<T> &c, const T &r, const Vec3<T> &sc,
const T &refl = 0, const T &transp = 0, const Vec3<T> &ec = 0) :
center(c), radius(r), surfaceColor(sc), reflection(refl),
transparency(transp), emissionColor(ec), radius2(r * r)
{}
// compute a ray-sphere intersection using the geometric solution
bool intersect(const Vec3<T> &rayorig, const Vec3<T> &raydir, T *t0 = NULL, T *t1 = NULL) const
{
// we start with a vector (l) from the ray origin (rayorig) to the center of the curent sphere.
Vec3<T> l = center - rayorig;
// tca is a vector length in the direction of the normalise raydir.
// its length is streched using dot until it forms a perfect right angle triangle with the l vector.
T tca = l.dot(raydir);
// if tca is < 0, the raydir is going in the opposite direction. No need to go further. Return false.
if (tca < 0) return false;
// if we keep on into the code, it's because the raydir may still hit the sphere.
// l.dot(l) gives us the l vector length to the power of 2. Then we use Pythagoras' theorem.
// remove the length tca to the power of two (tca * tca) and we get a distance from the center of the sphere to the power of 2 (d2).
T d2 = l.dot(l) - (tca * tca);
// if this distance to the center (d2) is greater than the radius to the power of 2 (radius2), the raydir direction is missing the sphere.
// No need to go further. Return false.
if (d2 > radius2) return false;
// Pythagoras' theorem again: radius2 is the hypotenuse and d2 is one of the side. Substraction gives the third side to the power of 2.
// Using sqrt, we obtain the length thc. thc is how deep tca goes into the sphere.
T thc = sqrt(radius2 - d2);
if (t0 != NULL && t1 != NULL) {
// remove thc to tca and you get the length from the ray origin to the surface hit point of the sphere.
*t0 = tca - thc;
// add thc to tca and you get the length from the ray origin to the surface hit point of the back side of the sphere.
*t1 = tca + thc;
}
// There is a intersection with a sphere, t0 and t1 have surface distances values. Return true.
return true;
}
};
std::vector<Sphere<double> *> spheres;
// function to mix 2 T varables.
template<typename T>
T mix(const T &a, const T &b, const T &mix)
{
return b * mix + a * (T(1) - mix);
}
// This is the main trace function. It takes a ray as argument (defined by its origin
// and direction). We test if this ray intersects any of the geometry in the scene.
// If the ray intersects an object, we compute the intersection point, the normal
// at the intersection point, and shade this point using this information.
// Shading depends on the surface property (is it transparent, reflective, diffuse).
// The function returns a color for the ray. If the ray intersects an object, it
// returns the color of the object at the intersection point, otherwise it returns
// the background color.
template<typename T>
Vec3<T> trace(const Vec3<T> &rayorig, const Vec3<T> &raydir,
const std::vector<Sphere<T> *> &spheres, const int &depth)
{
T tnear = INFINITY;
const Sphere<T> *sphere = NULL;
// Try to find intersection of this raydir with the spheres in the scene
for (unsigned i = 0; i < spheres.size(); ++i) {
T t0 = INFINITY, t1 = INFINITY;
if (spheres[i]->intersect(rayorig, raydir, &t0, &t1)) {
// is the rayorig inside the sphere (t0 < 0)? If so, use the second hit (t0 = t1)
if (t0 < 0) t0 = t1;
// tnear is the last sphere intersection (or infinity). Is t0 in front of tnear?
if (t0 < tnear) {
// if so, update tnear to this closer t0 and update the closest sphere
tnear = t0;
sphere = spheres[i];
}
}
}
// At this moment in the program, we have the closest sphere (sphere) and the closest hit position (tnear)
// For this pixel, if there's no intersection with a sphere, return a Vec3 with the background color.
if (!sphere) return Vec3<T>(.5); // Grey background color.
// if we keep on with the code, it is because we had an intersection with at least one sphere.
Vec3<T> surfaceColor = 0; // initialisation of the color of the ray/surface of the object intersected by the ray.
Vec3<T> phit = rayorig + (raydir * tnear); // point of intersection.
Vec3<T> nhit = phit - sphere->center; // normal at the intersection point.
// if the normal and the view direction are not opposite to each other,
// reverse the normal direction.
if (raydir.dot(nhit) > 0) nhit = -nhit;
nhit.normalize(); // normalize normal direction
// The angle between raydir and the normal at point hit (not used).
//T s_angle = acos(raydir.dot(nhit)) / ( sqrt(raydir.dot(raydir)) * sqrt(nhit.dot(nhit)));
//T s_incidence = sin(s_angle);
T bias = 1e-5; // add some bias to the point from which we will be tracing
// Do we have transparency or reflection?
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
T IdotN = raydir.dot(nhit); // raydir.normal
// I and N are not pointing in the same direction, so take the invert.
T facingratio = std::max(T(0), -IdotN);
// change the mix value between reflection and refraction to tweak the effect (fresnel effect)
T fresneleffect = mix<T>(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3<T> refldir = raydir - nhit * 2 * raydir.dot(nhit);
Vec3<T> reflection = trace(phit + (nhit * bias), refldir, spheres, depth + 1);
Vec3<T> refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
T ior = 1.2, eta = 1 / ior;
T k = 1 - eta * eta * (1 - IdotN * IdotN);
Vec3<T> refrdir = raydir * eta - nhit * (eta * IdotN + sqrt(k));
refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
// Look at all sphere to find lights
double shadow = 1.0;
for (unsigned i = 0; i < spheres.size(); ++i) {
if (spheres[i]->emissionColor.x > 0) {
// this is a light
Vec3<T> transmission = 1.0;
Vec3<T> lightDirection = spheres[i]->center - phit;
lightDirection.normalize();
T light_angle = (acos(raydir.dot(lightDirection)) / ( sqrt(raydir.dot(raydir)) * sqrt(lightDirection.dot(lightDirection))));
T light_incidence = sin(light_angle);
for (unsigned j = 0; j < spheres.size(); ++j) {
if (i != j) {
T t0, t1;
// Does the ray from point hit to the light intersect an object?
// If so, calculate the shadow.
if (spheres[j]->intersect(phit + (nhit * bias), lightDirection, &t0, &t1)) {
shadow = std::max(0.0, shadow - (1.0 - spheres[j]->transparency));
transmission = transmission * spheres[j]->surfaceColor * shadow;
//break;
}
}
}
// For each light found, we add light transmission to the pixel.
surfaceColor += sphere->surfaceColor * transmission *
std::max(T(0), nhit.dot(lightDirection)) * spheres[i]->emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
// Main rendering function. We compute a camera ray for each pixel of the image,
// trace it and return a color. If the ray hits a sphere, we return the color of the
// sphere at the intersection point, else we return the background color.
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
//********************************** OPEN GL ***********************************************
void init(void)
{
/* Select clearing (background) color */
glClearColor(0.0, 0.0, 0.0, 0.0);
glShadeModel(GL_FLAT);
/* Initialize viewing values */
//glMatrixMode(GL_PROJECTION);
gluOrtho2D(0,width,0,height);
}
void advanceDisplay(void)
{
cam_pos.z = cam_pos.z - 2;
glutPostRedisplay();
}
void backDisplay(void)
{
cam_pos.z = cam_pos.z + 2;
glutPostRedisplay();
}
void resetDisplay(void)
{
Vec3<double> new_cam_pos;
new_cam_pos = cam_pos;
cam_pos = new_cam_pos;
glutPostRedisplay();
}
void reshape(int w, int h)
{
glLoadIdentity();
gluOrtho2D(0,width,0,height);
glLoadIdentity();
}
void mouse(int button, int state, int x, int y)
{
switch (button)
{
case GLUT_LEFT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(advanceDisplay);
}
break;
case GLUT_MIDDLE_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(resetDisplay);
}
break;
case GLUT_RIGHT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(backDisplay);
}
break;
}
}
void display(void)
{
int i;
float x, y;
/* clear all pixels */
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
render<double>(spheres); // Creates the image and put it to memory in image[].
i=0;
glBegin(GL_POINTS);
for(y=1.0f;y>-1.0;y=y-height_step)
{
for(x=1.0f;x>-1.0;x=x-width_step)
{
glColor3f((std::min(double(1), image[i].x)),
(std::min(double(1), image[i].y)),
(std::min(double(1), image[i].z)));
glVertex2f(x, y);
if(i < width*height)
{
i = i + 1;
}
}
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
int main(int argc, char **argv)
{
// position, radius, surface color, reflectivity, transparency, emission color
spheres.push_back(new Sphere<double>(Vec3<double>(0, -10004, -20), 10000, Vec3<double>(0.2), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(3, 0, -15), 2, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(1, -1, -18), 1, Vec3<double>(1.0, 1.0, 1.0), 0.9, 0.9));
spheres.push_back(new Sphere<double>(Vec3<double>(-2, 2, -15), 2, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 3, -18), 1, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 0, -25), 1, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(-1, 1, -25), 2, Vec3<double>(1.0, 1.0, 1.0), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(2, 2, -25), 1, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(5, 3, -25), 2, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
// light
spheres.push_back(new Sphere<double>(Vec3<double>(-10, 20, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(3)));
spheres.push_back(new Sphere<double>(Vec3<double>(0, 10, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(1)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(width, height);
glutInitWindowPosition(10,10);
glutCreateWindow(argv[0]);
init();
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMouseFunc(mouse);
glutMainLoop();
delete [] image;
while (!spheres.empty()) {
Sphere<double> *sph = spheres.back();
spheres.pop_back();
delete sph;
}
return 0;
}
This is where the image is written to memory:
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
This is where I read it back and write it to each point of Opengl:
void display(void)
{
int i;
float x, y;
/* clear all pixels */
glClear(GL_COLOR_BUFFER_BIT);
glPushMatrix();
render<double>(spheres); // Creates the image and put it to memory in image[].
i=0;
glBegin(GL_POINTS);
for(y=1.0f;y>-1.0;y=y-height_step)
{
for(x=1.0f;x>-1.0;x=x-width_step)
{
glColor3f((std::min(double(1), image[i].x)),
(std::min(double(1), image[i].y)),
(std::min(double(1), image[i].z)));
glVertex2f(x, y);
if(i < width*height)
{
i = i + 1;
}
}
}
glEnd();
glPopMatrix();
glutSwapBuffers();
}
I have no idea what is causing this. Is it a bad design? An Opengl display mode? I don't know.
Is it a bad design?
Yes! Upload your rendered scene to a texture and then render a quad with it:
// g++ -O3 main.cpp -lglut -lGL -lGLU
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <fstream>
#include <vector>
#include <iostream>
#include <cassert>
// OpenGl
#include "GL/glut.h"
GLuint width = 800, height = 480;
GLdouble width_step = 2.0f / width;
GLdouble height_step = 2.0f / height;
const int MAX_RAY_DEPTH = 3;
template<typename T>
class Vec3
{
public:
T x, y, z;
// Vector constructors.
Vec3() : x(T(0)), y(T(0)), z(T(0)) {}
Vec3(T xx) : x(xx), y(xx), z(xx) {}
Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {}
// Vector normalisation.
Vec3& normalize()
{
T nor = x * x + y * y + z * z;
if (nor > 1) {
T invNor = 1 / sqrt(nor);
x *= invNor, y *= invNor, z *= invNor;
}
return *this;
}
// Vector operators.
Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); }
Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); }
T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; }
Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); }
Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); }
Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; }
Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; }
Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); }
};
template<typename T>
class Sphere
{
public:
// Sphere variables.
Vec3<T> center; /// position of the sphere
T radius, radius2; /// sphere radius and radius^2
Vec3<T> surfaceColor, emissionColor; /// surface color and emission (light)
T transparency, reflection; /// surface transparency and reflectivity
// Sphere constructor.
// position(c), radius(r), surface color(sc), reflectivity(refl), transparency(transp), emission color(ec)
Sphere(const Vec3<T> &c, const T &r, const Vec3<T> &sc,
const T &refl = 0, const T &transp = 0, const Vec3<T> &ec = 0) :
center(c), radius(r), surfaceColor(sc), reflection(refl),
transparency(transp), emissionColor(ec), radius2(r * r)
{}
// compute a ray-sphere intersection using the geometric solution
bool intersect(const Vec3<T> &rayorig, const Vec3<T> &raydir, T *t0 = NULL, T *t1 = NULL) const
{
// we start with a vector (l) from the ray origin (rayorig) to the center of the curent sphere.
Vec3<T> l = center - rayorig;
// tca is a vector length in the direction of the normalise raydir.
// its length is streched using dot until it forms a perfect right angle triangle with the l vector.
T tca = l.dot(raydir);
// if tca is < 0, the raydir is going in the opposite direction. No need to go further. Return false.
if (tca < 0) return false;
// if we keep on into the code, it's because the raydir may still hit the sphere.
// l.dot(l) gives us the l vector length to the power of 2. Then we use Pythagoras' theorem.
// remove the length tca to the power of two (tca * tca) and we get a distance from the center of the sphere to the power of 2 (d2).
T d2 = l.dot(l) - (tca * tca);
// if this distance to the center (d2) is greater than the radius to the power of 2 (radius2), the raydir direction is missing the sphere.
// No need to go further. Return false.
if (d2 > radius2) return false;
// Pythagoras' theorem again: radius2 is the hypotenuse and d2 is one of the side. Substraction gives the third side to the power of 2.
// Using sqrt, we obtain the length thc. thc is how deep tca goes into the sphere.
T thc = sqrt(radius2 - d2);
if (t0 != NULL && t1 != NULL) {
// remove thc to tca and you get the length from the ray origin to the surface hit point of the sphere.
*t0 = tca - thc;
// add thc to tca and you get the length from the ray origin to the surface hit point of the back side of the sphere.
*t1 = tca + thc;
}
// There is a intersection with a sphere, t0 and t1 have surface distances values. Return true.
return true;
}
};
std::vector<Sphere<double> *> spheres;
// function to mix 2 T varables.
template<typename T>
T mix(const T &a, const T &b, const T &mix)
{
return b * mix + a * (T(1) - mix);
}
// This is the main trace function. It takes a ray as argument (defined by its origin
// and direction). We test if this ray intersects any of the geometry in the scene.
// If the ray intersects an object, we compute the intersection point, the normal
// at the intersection point, and shade this point using this information.
// Shading depends on the surface property (is it transparent, reflective, diffuse).
// The function returns a color for the ray. If the ray intersects an object, it
// returns the color of the object at the intersection point, otherwise it returns
// the background color.
template<typename T>
Vec3<T> trace(const Vec3<T> &rayorig, const Vec3<T> &raydir,
const std::vector<Sphere<T> *> &spheres, const int &depth)
{
T tnear = INFINITY;
const Sphere<T> *sphere = NULL;
// Try to find intersection of this raydir with the spheres in the scene
for (unsigned i = 0; i < spheres.size(); ++i) {
T t0 = INFINITY, t1 = INFINITY;
if (spheres[i]->intersect(rayorig, raydir, &t0, &t1)) {
// is the rayorig inside the sphere (t0 < 0)? If so, use the second hit (t0 = t1)
if (t0 < 0) t0 = t1;
// tnear is the last sphere intersection (or infinity). Is t0 in front of tnear?
if (t0 < tnear) {
// if so, update tnear to this closer t0 and update the closest sphere
tnear = t0;
sphere = spheres[i];
}
}
}
// At this moment in the program, we have the closest sphere (sphere) and the closest hit position (tnear)
// For this pixel, if there's no intersection with a sphere, return a Vec3 with the background color.
if (!sphere) return Vec3<T>(.5); // Grey background color.
// if we keep on with the code, it is because we had an intersection with at least one sphere.
Vec3<T> surfaceColor = 0; // initialisation of the color of the ray/surface of the object intersected by the ray.
Vec3<T> phit = rayorig + (raydir * tnear); // point of intersection.
Vec3<T> nhit = phit - sphere->center; // normal at the intersection point.
// if the normal and the view direction are not opposite to each other,
// reverse the normal direction.
if (raydir.dot(nhit) > 0) nhit = -nhit;
nhit.normalize(); // normalize normal direction
// The angle between raydir and the normal at point hit (not used).
//T s_angle = acos(raydir.dot(nhit)) / ( sqrt(raydir.dot(raydir)) * sqrt(nhit.dot(nhit)));
//T s_incidence = sin(s_angle);
T bias = 1e-5; // add some bias to the point from which we will be tracing
// Do we have transparency or reflection?
if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) {
T IdotN = raydir.dot(nhit); // raydir.normal
// I and N are not pointing in the same direction, so take the invert.
T facingratio = std::max(T(0), -IdotN);
// change the mix value between reflection and refraction to tweak the effect (fresnel effect)
T fresneleffect = mix<T>(pow(1 - facingratio, 3), 1, 0.1);
// compute reflection direction (not need to normalize because all vectors
// are already normalized)
Vec3<T> refldir = raydir - nhit * 2 * raydir.dot(nhit);
Vec3<T> reflection = trace(phit + (nhit * bias), refldir, spheres, depth + 1);
Vec3<T> refraction = 0;
// if the sphere is also transparent compute refraction ray (transmission)
if (sphere->transparency) {
T ior = 1.2, eta = 1 / ior;
T k = 1 - eta * eta * (1 - IdotN * IdotN);
Vec3<T> refrdir = raydir * eta - nhit * (eta * IdotN + sqrt(k));
refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
}
// the result is a mix of reflection and refraction (if the sphere is transparent)
surfaceColor = (reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
}
else {
// it's a diffuse object, no need to raytrace any further
// Look at all sphere to find lights
double shadow = 1.0;
for (unsigned i = 0; i < spheres.size(); ++i) {
if (spheres[i]->emissionColor.x > 0) {
// this is a light
Vec3<T> transmission = 1.0;
Vec3<T> lightDirection = spheres[i]->center - phit;
lightDirection.normalize();
T light_angle = (acos(raydir.dot(lightDirection)) / ( sqrt(raydir.dot(raydir)) * sqrt(lightDirection.dot(lightDirection))));
T light_incidence = sin(light_angle);
for (unsigned j = 0; j < spheres.size(); ++j) {
if (i != j) {
T t0, t1;
// Does the ray from point hit to the light intersect an object?
// If so, calculate the shadow.
if (spheres[j]->intersect(phit + (nhit * bias), lightDirection, &t0, &t1)) {
shadow = std::max(0.0, shadow - (1.0 - spheres[j]->transparency));
transmission = transmission * spheres[j]->surfaceColor * shadow;
//break;
}
}
}
// For each light found, we add light transmission to the pixel.
surfaceColor += sphere->surfaceColor * transmission *
std::max(T(0), nhit.dot(lightDirection)) * spheres[i]->emissionColor;
}
}
}
return surfaceColor + sphere->emissionColor;
}
// Main rendering function. We compute a camera ray for each pixel of the image,
// trace it and return a color. If the ray hits a sphere, we return the color of the
// sphere at the intersection point, else we return the background color.
Vec3<double> *image = new Vec3<double>[width * height];
static Vec3<double> cam_pos = Vec3<double>(0);
template<typename T>
void render(const std::vector<Sphere<T> *> &spheres)
{
Vec3<T> *pixel = image;
T invWidth = 1 / T(width), invHeight = 1 / T(height);
T fov = 30, aspectratio = T(width) / T(height);
T angle = tan(M_PI * 0.5 * fov / T(180));
// Trace rays
for (GLuint y = 0; y < height; ++y) {
for (GLuint x = 0; x < width; ++x, ++pixel) {
T xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
T yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
Vec3<T> raydir(xx, yy, -1);
raydir.normalize();
*pixel = trace(cam_pos, raydir, spheres, 0);
}
}
}
//********************************** OPEN GL ***********************************************
void advanceDisplay(void)
{
cam_pos.z = cam_pos.z - 2;
glutPostRedisplay();
}
void backDisplay(void)
{
cam_pos.z = cam_pos.z + 2;
glutPostRedisplay();
}
void resetDisplay(void)
{
Vec3<double> new_cam_pos;
new_cam_pos = cam_pos;
cam_pos = new_cam_pos;
glutPostRedisplay();
}
void mouse(int button, int state, int x, int y)
{
switch (button)
{
case GLUT_LEFT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(advanceDisplay);
}
break;
case GLUT_MIDDLE_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(resetDisplay);
}
break;
case GLUT_RIGHT_BUTTON:
if(state == GLUT_DOWN)
{
glutIdleFunc(backDisplay);
}
break;
}
}
GLuint tex = 0;
void display(void)
{
int i;
float x, y;
render<double>(spheres); // Creates the image and put it to memory in image[].
std::vector< unsigned char > buf;
buf.reserve( width * height * 3 );
for( size_t y = 0; y < height; ++y )
{
for( size_t x = 0; x < width; ++x )
{
// flip vertically (height-y) because the OpenGL texture origin is in the lower-left corner
// flip horizontally (width-x) because...the original code did so
size_t i = (height-y) * width + (width-x);
buf.push_back( (unsigned char)( std::min(double(1), image[i].x) * 255.0 ) );
buf.push_back( (unsigned char)( std::min(double(1), image[i].y) * 255.0 ) );
buf.push_back( (unsigned char)( std::min(double(1), image[i].z) * 255.0 ) );
}
}
/* clear all pixels */
glClearColor(0.0, 0.0, 0.0, 0.0);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glEnable( GL_TEXTURE_2D );
glBindTexture( GL_TEXTURE_2D, tex );
glTexSubImage2D
(
GL_TEXTURE_2D, 0,
0, 0,
width, height,
GL_RGB,
GL_UNSIGNED_BYTE,
&buf[0]
);
glBegin( GL_QUADS );
glTexCoord2i( 0, 0 );
glVertex2i( -1, -1 );
glTexCoord2i( 1, 0 );
glVertex2i( 1, -1 );
glTexCoord2i( 1, 1 );
glVertex2i( 1, 1 );
glTexCoord2i( 0, 1 );
glVertex2i( -1, 1 );
glEnd();
glutSwapBuffers();
}
int main(int argc, char **argv)
{
// position, radius, surface color, reflectivity, transparency, emission color
spheres.push_back(new Sphere<double>(Vec3<double>(0, -10004, -20), 10000, Vec3<double>(0.2), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(3, 0, -15), 2, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(1, -1, -18), 1, Vec3<double>(1.0, 1.0, 1.0), 0.9, 0.9));
spheres.push_back(new Sphere<double>(Vec3<double>(-2, 2, -15), 2, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 3, -18), 1, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
spheres.push_back(new Sphere<double>(Vec3<double>(-4, 0, -25), 1, Vec3<double>(1.00, 0.1, 0.1), 0.65, 0.95));
spheres.push_back(new Sphere<double>(Vec3<double>(-1, 1, -25), 2, Vec3<double>(1.0, 1.0, 1.0), 0.0, 0.0));
spheres.push_back(new Sphere<double>(Vec3<double>(2, 2, -25), 1, Vec3<double>(0.1, 0.1, 1.0), 0.05, 0.5));
spheres.push_back(new Sphere<double>(Vec3<double>(5, 3, -25), 2, Vec3<double>(0.1, 1.0, 0.1), 0.3, 0.7));
// light
spheres.push_back(new Sphere<double>(Vec3<double>(-10, 20, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(3)));
spheres.push_back(new Sphere<double>(Vec3<double>(0, 10, 0), 3, Vec3<double>(0), 0, 0, Vec3<double>(1)));
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutInitWindowSize(width, height);
glutInitWindowPosition(10,10);
glutCreateWindow(argv[0]);
glutDisplayFunc(display);
glutMouseFunc(mouse);
glGenTextures( 1, &tex );
glBindTexture( GL_TEXTURE_2D, tex );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );
glTexImage2D( GL_TEXTURE_2D, 0, 3, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL );
glutMainLoop();
delete [] image;
while (!spheres.empty()) {
Sphere<double> *sph = spheres.back();
spheres.pop_back();
delete sph;
}
return 0;
}
How to load and display images is also explained on www.scratchapixel.com. Strange you didn't see this lesson:
http://www.scratchapixel.com/lessons/3d-basic-lessons/lesson-5-colors-and-digital-images/source-code/
It's all in there and they explain you how to display images using GL textures indeed.

Find mouse world-coordinates (3D) on a quadtree heightmaped terrain

I'm trying to find the mouse position in world coordinates but am having trouble finding the right code. At the moment I use this to determine the ray:
float pointX, pointY;
D3DXMATRIX projectionMatrix, viewMatrix, inverseViewMatrix, worldMatrix, translateMatrix, inverseWorldMatrix;
D3DXVECTOR3 direction, origin, rayOrigin, rayDirection;
bool intersect, result;
// Move the mouse cursor coordinates into the -1 to +1 range.
pointX = ((2.0f * (float)mouseX) / (float)m_screenWidth) - 1.0f;
pointY = (((2.0f * (float)mouseY) / (float)m_screenHeight) - 1.0f) * -1.0f;
// Adjust the points using the projection matrix to account for the aspect ratio of the viewport.
m_Direct3D->GetProjectionMatrix(projectionMatrix);
pointX = pointX / projectionMatrix._11;
pointY = pointY / projectionMatrix._22;
// Get the inverse of the view matrix.
m_Camera->GetViewMatrix(viewMatrix);
D3DXMatrixInverse(&inverseViewMatrix, NULL, &viewMatrix);
// Calculate the direction of the picking ray in view space.
direction.x = (pointX * inverseViewMatrix._11) + (pointY * inverseViewMatrix._21) + inverseViewMatrix._31;
direction.y = (pointX * inverseViewMatrix._12) + (pointY * inverseViewMatrix._22) + inverseViewMatrix._32;
direction.z = (pointX * inverseViewMatrix._13) + (pointY * inverseViewMatrix._23) + inverseViewMatrix._33;
// Get the origin of the picking ray which is the position of the camera.
origin = m_Camera->GetPosition();
This gives me the origin and direction of the ray.
But...
I use a custom mesh (not the one from directX) with a heightmap, separated into quadtrees and I don't know if my logic is correct, I tried using the frustum to determine which nodes in the quadtree are visible and so do the checking intersection of triangles only on those nodes, here is this code:
Note* m_mousepos is a vector.
bool QuadTreeClass::getTriangleRay(NodeType* node, FrustumClass* frustum, ID3D10Device* device, D3DXVECTOR3 vPickRayDir, D3DXVECTOR3 vPickRayOrig){
bool result;
int count, i, j, indexCount;
unsigned int stride, offset;
float fBary1, fBary2;
float fDist;
D3DXVECTOR3 v0, v1, v2;
float p1, p2, p3;
// Check to see if the node can be viewed.
result = frustum->CheckCube(node->positionX, 0.0f, node->positionZ, (node->width / 2.0f));
if(!result)
{
return false;
}
// If it can be seen then check all four child nodes to see if they can also be seen.
count = 0;
for(i=0; i<4; i++)
{
if(node->nodes[i] != 0)
{
count++;
getTriangleRay(node->nodes[i], frustum, device, vPickRayOrig, vPickRayDir);
}
}
// If there were any children nodes then dont continue
if(count != 0)
{
return false;
}
// Now intersect each triangle in this node
j = 0;
for(i=0; i<node->triangleCount; i++){
j = i * 3;
v0 = D3DXVECTOR3( node->vertexArray[j].x, node->vertexArray[j].y, node->vertexArray[j].z);
j++;
v1 = D3DXVECTOR3( node->vertexArray[j].x, node->vertexArray[j].y, node->vertexArray[j].z);
j++;
v2 = D3DXVECTOR3( node->vertexArray[j].x, node->vertexArray[j].y, node->vertexArray[j].z);
result = IntersectTriangle( vPickRayOrig, vPickRayDir, v0, v1, v2, &fDist, &fBary1, &fBary2);
if(result == true){
// intersection = true, so get a aproximate center of the triangle on the world
p1 = (v0.x + v0.x + v0.x)/3;
p2 = (v0.y + v1.y + v2.y)/3;
p3 = (v0.z + v1.z + v2.z)/3;
m_mousepos = D3DXVECTOR3(p1, p2, p3);
return true;
}
}
}
bool QuadTreeClass::IntersectTriangle( const D3DXVECTOR3& orig, const D3DXVECTOR3& dir,D3DXVECTOR3& v0, D3DXVECTOR3& v1, D3DXVECTOR3& v2, FLOAT* t, FLOAT* u, FLOAT* v ){
// Find vectors for two edges sharing vert0
D3DXVECTOR3 edge1 = v1 - v0;
D3DXVECTOR3 edge2 = v2 - v0;
// Begin calculating determinant - also used to calculate U parameter
D3DXVECTOR3 pvec;
D3DXVec3Cross( &pvec, &dir, &edge2 );
// If determinant is near zero, ray lies in plane of triangle
FLOAT det = D3DXVec3Dot( &edge1, &pvec );
D3DXVECTOR3 tvec;
if( det > 0 )
{
tvec = orig - v0;
}
else
{
tvec = v0 - orig;
det = -det;
}
if( det < 0.0001f )
return FALSE;
// Calculate U parameter and test bounds
*u = D3DXVec3Dot( &tvec, &pvec );
if( *u < 0.0f || *u > det )
return FALSE;
// Prepare to test V parameter
D3DXVECTOR3 qvec;
D3DXVec3Cross( &qvec, &tvec, &edge1 );
// Calculate V parameter and test bounds
*v = D3DXVec3Dot( &dir, &qvec );
if( *v < 0.0f || *u + *v > det )
return FALSE;
// Calculate t, scale parameters, ray intersects triangle
*t = D3DXVec3Dot( &edge2, &qvec );
FLOAT fInvDet = 1.0f / det;
*t *= fInvDet;
*u *= fInvDet;
*v *= fInvDet;
return TRUE;
}
Please is this code right? If it is then my problem must be related to the quadtree.
Thanks!
Iterating over all visible triangle to find the intersection is very expensive. Additional the cost will rise if your heightmap gets finer.
For my heightmap I use a different approach:
I do a step-by-step search regarding the height on the clickray starting at the origin. At every step the current position is moved along the ray and tested against the height of the heightmap (therefore you need a heightfunction). If the current position is below the heightmap, the last intervall is searched again by an additional iteration to find a finer position. This works as long as your heightmap hasn't a too high frequency in the heightvalues regarding to the stepsize (otherwise you could jump over a peak).