DirectX11 Billboard not showing - c++

I'm trying billboarding, however the billboard doesn't show when i use 'newWorld' i've tested to see if it was the way i was drawing the billboard, but it isn't, as when i changed to it's own local world, it appeared on screen.
// Calculate the rotation that needs to be applied to the billboard model to face the current camera position using the arc tangent function.
XMFLOAT3 CarPos = XMFLOAT3(Billboards->GetPosition().x, Billboards->GetPosition().y, Billboards->GetPosition().z);
XMFLOAT3 CamPos = XMFLOAT3(fCam->GetEye().x, fCam->GetEye().y, fCam->GetEye().z);
float angle;
float BBrotation;
XMMATRIX newWorld = XMLoadFloat4x4(&Billboards->GetWorld());
angle = atan2(CarPos.x - CamPos.x, CarPos.z - CamPos.z) * (180 / XM_PI);
BBrotation = angle * 0.0174532925f;
XMMatrixRotationY(BBrotation);
XMMatrixTranslation(CarPos.x, CarPos.y, CarPos.z);
XMMATRIX bbRotMatix = XMMatrixRotationY(BBrotation);
XMMATRIX bbCarPos = XMMatrixTranslation(CarPos.x, CarPos.y, CarPos.z);
newWorld = bbCarPos + bbRotMatix;
XMMATRIX bilworld = XMLoadFloat4x4(&Billboards->GetWorld());
cb.World = XMMatrixTranspose(newWorld);
_pImmediateContext->UpdateSubresource(_pConstantBuffer, 0, nullptr, &cb, 0, 0);
Billboards->Draw(_pd3dDevice, _pImmediateContext, _pTreeRV, _pSpecRV);
Billboards->SetTranslation(30.0f, 0.0f, 0.0f);
Billboards->SetRotation(-90.0f, 0.0f, 0.0);

The SimpleMath DirectXMath wrapper from the Directx Tool Kit has right-handed billboard functions you might find useful as reference.
inline Matrix Matrix::CreateBillboard( const Vector3& object, const Vector3& cameraPosition, const Vector3& cameraUp, const Vector3* cameraForward )
{
using namespace DirectX;
XMVECTOR O = XMLoadFloat3( &object );
XMVECTOR C = XMLoadFloat3( &cameraPosition );
XMVECTOR Z = XMVectorSubtract( O, C );
XMVECTOR N = XMVector3LengthSq( Z );
if ( XMVector3Less( N, g_XMEpsilon ) )
{
if ( cameraForward )
{
XMVECTOR F = XMLoadFloat3( cameraForward );
Z = XMVectorNegate( F );
}
else
Z = g_XMNegIdentityR2;
}
else
{
Z = XMVector3Normalize( Z );
}
XMVECTOR up = XMLoadFloat3( &cameraUp );
XMVECTOR X = XMVector3Cross( up, Z );
X = XMVector3Normalize( X );
XMVECTOR Y = XMVector3Cross( Z, X );
XMMATRIX M;
M.r[0] = X;
M.r[1] = Y;
M.r[2] = Z;
M.r[3] = XMVectorSetW( O, 1.f );
Matrix R;
XMStoreFloat4x4( &R, M );
return R;
}
inline Matrix Matrix::CreateConstrainedBillboard( const Vector3& object, const Vector3& cameraPosition, const Vector3& rotateAxis,
const Vector3* cameraForward, const Vector3* objectForward )
{
using namespace DirectX;
static const XMVECTORF32 s_minAngle = { 0.99825467075f, 0.99825467075f, 0.99825467075f, 0.99825467075f }; // 1.0 - XMConvertToRadians( 0.1f );
XMVECTOR O = XMLoadFloat3( &object );
XMVECTOR C = XMLoadFloat3( &cameraPosition );
XMVECTOR faceDir = XMVectorSubtract( O, C );
XMVECTOR N = XMVector3LengthSq( faceDir );
if (XMVector3Less(N, g_XMEpsilon))
{
if (cameraForward)
{
XMVECTOR F = XMLoadFloat3( cameraForward );
faceDir = XMVectorNegate( F );
}
else
faceDir = g_XMNegIdentityR2;
}
else
{
faceDir = XMVector3Normalize( faceDir );
}
XMVECTOR Y = XMLoadFloat3( &rotateAxis );
XMVECTOR X, Z;
XMVECTOR dot = XMVectorAbs( XMVector3Dot( Y, faceDir ) );
if ( XMVector3Greater( dot, s_minAngle ) )
{
if ( objectForward )
{
Z = XMLoadFloat3( objectForward );
dot = XMVectorAbs( XMVector3Dot( Y, Z ) );
if ( XMVector3Greater( dot, s_minAngle ) )
{
dot = XMVectorAbs( XMVector3Dot( Y, g_XMNegIdentityR2 ) );
Z = ( XMVector3Greater( dot, s_minAngle ) ) ? g_XMIdentityR0 : g_XMNegIdentityR2;
}
}
else
{
dot = XMVectorAbs( XMVector3Dot( Y, g_XMNegIdentityR2 ) );
Z = ( XMVector3Greater( dot, s_minAngle ) ) ? g_XMIdentityR0 : g_XMNegIdentityR2;
}
X = XMVector3Cross( Y, Z );
X = XMVector3Normalize( X );
Z = XMVector3Cross( X, Y );
Z = XMVector3Normalize( Z );
}
else
{
X = XMVector3Cross( Y, faceDir );
X = XMVector3Normalize( X );
Z = XMVector3Cross( X, Y );
Z = XMVector3Normalize( Z );
}
XMMATRIX M;
M.r[0] = X;
M.r[1] = Y;
M.r[2] = Z;
M.r[3] = XMVectorSetW( O, 1.f );
Matrix R;
XMStoreFloat4x4( &R, M );
return R;
}

Related

OpenGl: Problem with Arcball Camera Rotation

i need to implement arcball camera. I got something similar, but it works very crookedly (the angle changes sharply, when turning to the right / left, the camera raises up / down strongly).
Here is my source code, can you tell me where I went wrong:
bool get_arcball_vec(double x, double y, glm::vec3& a)
{
glm::vec3 vec = glm::vec3((2.0 * x) / window.getWidth() - 1.0, 1.0 - (2.0 * y) / window.getHeight(), 0.0);
if (glm::length(vec) >= 1.0)
{
vec = glm::normalize(vec);
}
else
{
vec.z = sqrt(1.0 - pow(vec.x, 2.0) - pow(vec.y, 2.0));
}
a = vec;
return true;
}
...
void onMouseMove(double x, double y) {
if (rightMouseButtonPressed) {
glm::vec3 a,b;
cur_mx = x;
cur_my = y;
if (cur_mx != last_mx || cur_my != last_my)
if (get_arcball_vec(last_mx, last_my, a) && get_arcball_vec(cur_mx, cur_my, b))
viewport.getCamera().orbit(a,b);
last_mx = cur_mx;
last_my = cur_my;
...
void Camera::orbit(glm::vec3 a, glm::vec3 b)
{
forward = calcForward();
right = calcRight();
double alpha = acos(glm::min(1.0f, glm::dot(b, a)));
glm::vec3 axis = glm::cross(a, b);
glm::mat4 rotationComponent = glm::mat4(1.0f);
rotationComponent[0] = glm::vec4(right, 0.0f);
rotationComponent[1] = glm::vec4(up, 0.0f);
rotationComponent[2] = glm::vec4(forward, 0.0f);
glm::mat4 toWorldCameraSpace = glm::transpose(rotationComponent);
axis = toWorldCameraSpace * glm::vec4(axis, 1.0);
glm::mat4 orbitMatrix = glm::rotate(glm::mat4(1.0f), (float)alpha, axis);
eye = glm::vec4(target, 1.0) + orbitMatrix * glm::vec4(eye - target, 1.0f);
up = orbitMatrix * glm::vec4(up, 1.0f);
}
I use this code to map 2D mouse position to the sphere:
Vector3 GetArcBallVector(const Vector2f & mousePos) {
float radiusSquared = 1.0; //squared radius of the sphere
//compute mouse position from the centre of screen to interval [-half, +half]
Vector3 pt = Vector3(
mousePos.x - halfScreenW,
halfScreenH - mousePos.y,
0.0f
);
//if length squared is smaller than sphere diameter
//point is inside
float lengthSqr = pt.x * pt.x + pt.y * pt.y;
if (lengthSqr < radiusSquared){
//inside
pt.z = std::sqrtf(radiusSquared - lengthSqr);
}
else {
pt.z = 0.0f;
}
pt.z *= -1;
return pt;
}
To calculate rotation, I use the last (startPt) and current (endPt) mapped position and do:
Quaternion actRot = Quaternion::Identity();
Vector3 axis = Vector3::Cross(endPt, startPt);
if (axis.LengthSquared() > MathUtils::EPSILON) {
float angleCos = Vector3::Dot(endPt, startPt);
actRot = Quaternion(axis.x, axis.y, axis.z, angleCos);
}
I prefer to use Quaternions over matrices since they are easy to multiply (for acumulated rotation) and interpolate (for some smooting).

c++: Find the closest point on a mesh to a query point

Here is what I need:
Given a point(x,y,z) in 3d space, and a mesh compose of some vertices(x,y,z), to calculate and return the close point coordinate on that mesh.
The function probably like this:
bool closePointOnMesh(const Point& queryPoint, const Mesh& myMesh, float maxDistance);
I have done some searching, and probably I will choose octree to reduce the calculation.
But there are still many details that I can't understand:
1: How the octree node been subdivided, so each node contains may contains 0~some triangles? It is easier to subdivided the cell further based on vertices and just store vertices directly.
2: How the octree structure helps to reduce the calculation, I know if the cell is empty I will just disregard it. But do I need to get all the closest point within each triangle face in a octree cell to the queryPoint, so I finally get the most closest point of all? that sound still heavy. Beside it will be more easier if I just iter through all the triangles, get the closest point from them, which means no need for the octree???
3: Is there a fast way to get the closest point to a point within a triangle face?
4: how the maxDistance limit helps to reduce the calculation?
For #3, here's some code on how to get the closest point of a triangle. It projects the point onto the triangle's plane, and then clamps the barycentric coordinates to [0,1], and uses those values computes the closest point.
Copied below:
vector3 closesPointOnTriangle( const vector3 *triangle, const vector3 &sourcePosition )
{
vector3 edge0 = triangle[1] - triangle[0];
vector3 edge1 = triangle[2] - triangle[0];
vector3 v0 = triangle[0] - sourcePosition;
float a = edge0.dot( edge0 );
float b = edge0.dot( edge1 );
float c = edge1.dot( edge1 );
float d = edge0.dot( v0 );
float e = edge1.dot( v0 );
float det = a*c - b*b;
float s = b*e - c*d;
float t = b*d - a*e;
if ( s + t < det )
{
if ( s < 0.f )
{
if ( t < 0.f )
{
if ( d < 0.f )
{
s = clamp( -d/a, 0.f, 1.f );
t = 0.f;
}
else
{
s = 0.f;
t = clamp( -e/c, 0.f, 1.f );
}
}
else
{
s = 0.f;
t = clamp( -e/c, 0.f, 1.f );
}
}
else if ( t < 0.f )
{
s = clamp( -d/a, 0.f, 1.f );
t = 0.f;
}
else
{
float invDet = 1.f / det;
s *= invDet;
t *= invDet;
}
}
else
{
if ( s < 0.f )
{
float tmp0 = b+d;
float tmp1 = c+e;
if ( tmp1 > tmp0 )
{
float numer = tmp1 - tmp0;
float denom = a-2*b+c;
s = clamp( numer/denom, 0.f, 1.f );
t = 1-s;
}
else
{
t = clamp( -e/c, 0.f, 1.f );
s = 0.f;
}
}
else if ( t < 0.f )
{
if ( a+d > b+e )
{
float numer = c+e-b-d;
float denom = a-2*b+c;
s = clamp( numer/denom, 0.f, 1.f );
t = 1-s;
}
else
{
s = clamp( -e/c, 0.f, 1.f );
t = 0.f;
}
}
else
{
float numer = c+e-b-d;
float denom = a-2*b+c;
s = clamp( numer/denom, 0.f, 1.f );
t = 1.f - s;
}
}
return triangle[0] + s * edge0 + t * edge1;
}

OpenGL Google maps style 2D camera / zoom to mouse cursor

I am trying to implement a 2D camera in OpenGL that behaves like the Google maps camera. Specifically the "zoom to mouse point" functionality.
So far I have been able to implement pan and zoom OK - but only if the zoom is locked to the center of the window/widget. If I try to zoom on the mouse location the view seems to "jump" and after the zoom level increases the item I zoomed in on is no longer under the mouse cursor.
My camera class is below - quite a lot of code but I couldn't make it any smaller sorry!
I call Apply() on the start of each frame, and I call SetX/YPos when the scene is panned, finally I call SetScale with the previous scale +/- 0.1f with the mouse position when the mouse wheel is scrolled.
camera.h
class Camera
{
public:
Camera();
void Apply();
void SetXPos(float xpos);
void SetYPos(float ypos);
void SetScale(float scaleFactor, float mx, float my);
float XPos() const { return m_XPos; }
float YPos() const { return m_YPos; }
float Scale() const { return m_ScaleFactor; }
void SetWindowSize(int w, int h);
void DrawTestItems();
private:
void init_matrix();
float m_XPos;
float m_YPos;
float m_ScaleFactor;
float m_Width;
float m_Height;
float m_ZoomX;
float m_ZoomY;
};
camera.cpp
Camera::Camera()
: m_XPos(0.0f),
m_YPos(0.0f),
m_ScaleFactor(1.0f),
m_ZoomX(0.0f),
m_ZoomY(0.0f),
m_Width(0.0f),
m_Height(0.0f)
{
}
// Called when window is created and when window is resized
void Camera::SetWindowSize(int w, int h)
{
m_Width = (float)w;
m_Height = (float)h;
}
void Camera::init_matrix()
{
glViewport(0, 0, m_Width, m_Height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
float new_W = m_Width * m_ScaleFactor;
float new_H = m_Height * m_ScaleFactor;
// Point to zoom on
float new_x = m_ZoomX;
float new_y = m_ZoomY;
glOrtho( -new_W/2+new_x,
new_W/2+new_x,
new_H/2+new_y,
-new_H/2+new_y,
-1,1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
void Camera::Apply()
{
// Zoom
init_matrix();
// Pan
glTranslatef( m_XPos, m_YPos, 1.0f );
DrawTestItems();
}
void Camera::SetXPos(float xpos)
{
m_XPos = xpos;
}
void Camera::SetYPos(float ypos)
{
m_YPos = ypos;
}
// mx,my = window coords of mouse pos when wheel was scrolled
// scale factor goes up or down by 0.1f
void Camera::SetScale(float scaleFactor, float mx, float my)
{
m_ZoomX = (float)mx;
m_ZoomY = (float)my;
m_ScaleFactor = scaleFactor;
}
void Camera::DrawTestItems()
{
}
Update: I seem to have noticed 2 issues:
The mouse position in SetScale is incorrect - I don't know why.
No matter what I try glOrtho causes the centre of the screen to be the zoom point,I confirmed this setting the zoom point manually/hard coding it. In Google maps the screen won't "stick" to the centre like this.
Update again:
I'm also using Qt if this makes any difference, I just have a basic QGLWidget and I am using the mouse wheel event to perform the zoom. I take the delta of the wheel event and then either add or subtract 0.1f to the scale passing in the mouse position from the wheel event.
Get the world-space coordinates of the mouse cursor using the current zoom factor and model/proj/view matrices.
Adjust zoom factor
Get the world-space mouse coordinates again using the new zoom factor
Shift the camera position by the difference in world-space mouse coordinates
Redraw scene using new camera position and zoom factor
Something like this (in the wheel() callback):
#include <GL/freeglut.h>
#include <iostream>
using namespace std;
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
glm::dvec3 Unproject( const glm::dvec3& win )
{
glm::ivec4 view;
glm::dmat4 proj, model;
glGetDoublev( GL_MODELVIEW_MATRIX, &model[0][0] );
glGetDoublev( GL_PROJECTION_MATRIX, &proj[0][0] );
glGetIntegerv( GL_VIEWPORT, &view[0] );
glm::dvec3 world = glm::unProject( win, model, proj, view );
return world;
}
// unprojects the given window point
// and finds the ray intersection with the Z=0 plane
glm::dvec2 PlaneUnproject( const glm::dvec2& win )
{
glm::dvec3 world1 = Unproject( glm::dvec3( win, 0.01 ) );
glm::dvec3 world2 = Unproject( glm::dvec3( win, 0.99 ) );
// u is a value such that:
// 0 = world1.z + u * ( world2.z - world1.z )
double u = -world1.z / ( world2.z - world1.z );
// clamp u to reasonable values
if( u < 0 ) u = 0;
if( u > 1 ) u = 1;
return glm::dvec2( world1 + u * ( world2 - world1 ) );
}
// pixels per unit
const double ppu = 1.0;
glm::dvec2 center( 0 );
double scale = 1.0;
void ApplyCamera()
{
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
const double w = glutGet( GLUT_WINDOW_WIDTH ) / ppu;
const double h = glutGet( GLUT_WINDOW_HEIGHT ) / ppu;
glOrtho( -w/2, w/2, -h/2, h/2, -1, 1 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
glScaled( scale, scale, 1.0 );
glTranslated( -center[0], -center[1], 0 );
}
glm::dvec2 mPos;
glm::dvec2 centerStart( 0 );
int btn = -1;
void mouse( int button, int state, int x, int y )
{
ApplyCamera();
y = glutGet( GLUT_WINDOW_HEIGHT ) - y;
mPos = glm::ivec2( x, y );
btn = button;
if( GLUT_LEFT_BUTTON == btn && GLUT_DOWN == state )
{
centerStart = PlaneUnproject( glm::dvec2( x, y ) );
}
if( GLUT_LEFT_BUTTON == btn && GLUT_UP == state )
{
btn = -1;
}
glutPostRedisplay();
}
void motion( int x, int y )
{
y = glutGet( GLUT_WINDOW_HEIGHT ) - y;
mPos = glm::ivec2( x, y );
if( GLUT_LEFT_BUTTON == btn )
{
ApplyCamera();
glm::dvec2 cur = PlaneUnproject( glm::dvec2( x, y ) );
center += ( centerStart - cur );
}
glutPostRedisplay();
}
void passiveMotion( int x, int y )
{
y = glutGet( GLUT_WINDOW_HEIGHT ) - y;
mPos = glm::ivec2( x, y );
glutPostRedisplay();
}
void wheel( int wheel, int direction, int x, int y )
{
y = glutGet( GLUT_WINDOW_HEIGHT ) - y;
mPos = glm::ivec2( x, y );
ApplyCamera();
glm::dvec2 beforeZoom = PlaneUnproject( glm::dvec2( x, y ) );
const double scaleFactor = 0.90;
if( direction == -1 ) scale *= scaleFactor;
if( direction == 1 ) scale /= scaleFactor;
ApplyCamera();
glm::dvec2 afterZoom = PlaneUnproject( glm::dvec2( x, y ) );
center += ( beforeZoom - afterZoom );
glutPostRedisplay();
}
void display()
{
glClearColor( 0, 0, 0, 1 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
ApplyCamera();
glm::dvec2 cur = PlaneUnproject( mPos );
cout << cur.x << " " << cur.y << " " << scale << endl;
glPushMatrix();
glScalef( 50, 50, 1 );
glBegin( GL_QUADS );
glColor3ub( 255, 255, 255 );
glVertex2i( -1, -1 );
glVertex2i( 1, -1 );
glVertex2i( 1, 1 );
glVertex2i( -1, 1 );
glEnd();
glPopMatrix();
glutSwapBuffers();
}
int main( int argc, char **argv )
{
glutInit( &argc, argv );
glutInitDisplayMode( GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE );
glutInitWindowSize( 600, 600 );
glutCreateWindow( "GLUT" );
glutMouseFunc( mouse );
glutMotionFunc( motion );
glutMouseWheelFunc( wheel );
glutDisplayFunc( display );
glutPassiveMotionFunc( passiveMotion );
glutMainLoop();
return 0;
}

c++ DirectX 11 - 3rd person camera

I'm trying to set a third person camera, but i'm lost with rotations. My rotation are working for the Y axis, but the others are moving strangely. This is my code :
XMMATRIX Camera2::Render()
{
return XMMatrixLookAtLH( XMLoadFloat3( &m_vPosition ), XMLoadFloat3( &m_vTargetPos ), XMLoadFloat3( &( XMFLOAT3( 0.0f, 1.0f, 0.0f ) ) ) );
}
void Camera2::Rotate( float fAngle, int nAxe )
{
float fToRad = 0.0174532925f;
fAngle *= fToRad;
if( nAxe == 0 )
{
XMFLOAT3 vPosition = m_vPosition;
m_vPosition.y = vPosition.y * cos( fAngle ) - vPosition.z * sin( fAngle );
m_vPosition.z = vPosition.y * sin( fAngle ) + vPosition.z * cos( fAngle );
}
else if( nAxe == 1 )
{
XMFLOAT3 vPosition = m_vPosition;
m_vPosition.z = vPosition.z * cos( fAngle ) - vPosition.x * sin( fAngle );
m_vPosition.x = vPosition.z * sin( fAngle ) + vPosition.x * cos( fAngle );
}
else if( nAxe == 2 )
{
XMFLOAT3 vPosition = m_vPosition;
m_vPosition.x = vPosition.x * cos( fAngle ) - vPosition.y * sin( fAngle );
m_vPosition.y = vPosition.x * sin( fAngle ) + vPosition.y * cos( fAngle );
}
}
And the code calling the camera functions ( x = 0, y = 1, z = 2 ) :
if( event.IsPushedKey( VK_F1 ) )
m_pCamera->Rotate( -3.0f, 0 );
else if( event.IsPushedKey( VK_F2 ) )
m_pCamera->Rotate( -3.0f, 1 );
else if( event.IsPushedKey( VK_F3 ) )
m_pCamera->Rotate( -3.0f, 2 );
else if( event.IsPushedKey( VK_F4 ) )
m_pCamera->Rotate( 3.0f, 0 );
else if( event.IsPushedKey( VK_F5 ) )
m_pCamera->Rotate( 3.0f, 1 );
else if( event.IsPushedKey( VK_F6 ) )
m_pCamera->Rotate( 3.0f, 2 );
Another question : When i start with XMFLOAT3( 0.0f, 0.0f, 0.0f ) as lookAt variable and have an x and y position equal to 0, nothing is draw. I need to set one of the axe of lookAt to 1.0f to see something. Why?
Your rotation matrix was wrong for rotation around Y-axis and Z-axis, here is the matrix.
Rotation around X-axis by theta(in radian)
Rotation around Y-axis by theta(in radian)
Rotation around Z-axis by theta(in radian)
Imagine the real world.
What happens when you try to focus on your own eye?
It's not possible.

Get world coordinates from D3DXIntersectTri

I have a square area on which I have to determine where the mouse pointing.
With D3DXIntersectTri I can tell IF the mouse pointing on it, but I have trouble calculating the x,y,z coordinates.
The drawing from vertex buffer, which initialized with the vertices array:
vertices[0].position = D3DXVECTOR3(-10, 0, -10);
vertices[1].position = D3DXVECTOR3(-10, 0, 10);
vertices[2].position = D3DXVECTOR3( 10, 0, -10);
vertices[3].position = D3DXVECTOR3( 10, 0, -10);
vertices[4].position = D3DXVECTOR3(-10, 0, 10);
vertices[5].position = D3DXVECTOR3( 10, 0, 10);
I have this method so far, this is not giving me the right coordinates (works only on a small part of the area, near two of the edges and more less accurate inside):
BOOL Area::getcoord( Ray& ray, D3DXVECTOR3& coord)
{
D3DXVECTOR3 rayOrigin, rayDirection;
rayDirection = ray.direction;
rayOrigin = ray.origin;
float d;
D3DXMATRIX matInverse;
D3DXMatrixInverse(&matInverse, NULL, &matWorld);
// Transform ray origin and direction by inv matrix
D3DXVECTOR3 rayObjOrigin,rayObjDirection;
D3DXVec3TransformCoord(&rayOrigin, &rayOrigin, &matInverse);
D3DXVec3TransformNormal(&rayDirection, &rayDirection, &matInverse);
D3DXVec3Normalize(&rayDirection,&rayDirection);
float u, v;
BOOL isHit1, isHit2;
D3DXVECTOR3 p1, p2, p3;
p1 = vertices[3].position;
p2 = vertices[4].position;
p3 = vertices[5].position;
isHit1 = D3DXIntersectTri(&p1, &p2, &p3, &rayOrigin, &rayDirection, &u, &v, &d);
isHit2 = FALSE;
if(!isHit1)
{
p1 = vertices[0].position;
p2 = vertices[1].position;
p3 = vertices[2].position;
isHit2 = D3DXIntersectTri(&p1, &p2, &p3, &rayOrigin, &rayDirection, &u, &v, &d);
}
if(isHit1)
{
coord.x = 1 * ((1-u-v)*p3.x + u*p3.y + v*p3.z);
coord.y = 0.2f;
coord.z = -1 * ((1-u-v)*p1.x + u*p1.y + v*p1.z);
D3DXVec3TransformCoord(&coord, &coord, &matInverse);
}
if(isHit2)
{
coord.x = -1 * ((1-u-v)*p3.x + u*p3.y + v*p3.z);
coord.y = 0.2f;
coord.z = 1 * ((1-u-v)*p1.x + u*p1.y + v*p1.z);
D3DXVec3TransformCoord(&coord, &coord, &matWorld);
}
return isHit1 || isHit2;
}
Barycentric coordinates don't work the way you used them. u and v define the weight of the source vectors. So if you want to calculate the hit point, you will have to compute
coord = u * p1 + v * p2 + (1 - u - v) * p3
Alternatively you can use the d ray parameter:
coord = rayOrigin + d * rDirection
Both ways should result in the same coordinate.