Hello everyone I am messing around with some DLL injection stuff. Therefore I downloaded a simple Direct3D 9 sample application (it shows only a cube), which I want to manipulate.
I already managed to hook the EndScene and Reset function of this application, and now simply want to add more light to the current drawn frame.
Here is my EndScene function:
HRESULT WINAPI myEndScene(LPDIRECT3DDEVICE9 pDevice) {
lightHackTest2(pDevice);
auto ret = origEndScene(pDevice);
placeHooks();
return ret;
}
and here is my lightHackTest2 function:
void lightHackTest2(LPDIRECT3DDEVICE9 pDevice) {
pDevice->SetRenderState(D3DRS_AMBIENT, D3DCOLOR_XRGB(100, 100, 100));
}
void lightHackTest(LPDIRECT3DDEVICE9 pDevice) {
D3DLIGHT9 light;
ZeroMemory(&light, sizeof(light));
light.Type = D3DLIGHT_DIRECTIONAL;
light.Diffuse.r = 0.5f;
light.Diffuse.g = 0.5f;
light.Diffuse.b = 0.5f;
light.Diffuse.a = 1.0f;
light.Direction.x = -1.0f;
light.Direction.y = -0.5f;
light.Direction.z = -1.0f;
pDevice->SetLight(0, &light);
pDevice->LightEnable(0, TRUE);
}
These functions do actually get called (checked it with some MessageBoxes) but everything does stay the same in the scene.
Am I applying the light wrong?
Related
I am using D2D with D3D11. I have some code that uses GetCursorpos() from the windows API which is then converted to client coordinates and then draws a small circle at this position using D2D FillEllipse(). The screen to client coordinates work perfectly but for some reason D2D draws the circle a small distance from the expected location (tens of pixels) as if the coordinate had been scaled by a small factor so that the error gets larger as the circle is drawn further from (0, 0).
I noticed changing the dpi for the D2D1_RENDER_TARGET_PROPERTIES affects this 'scaling' so I suspect the problem has something to do with dpi. This is the code for creating the D2D render target from the DXGI surface I obtained from the swapchain in my D3D11 code.
// Create render target
float dpiX, dpiY;
this->factory->GetDesktopDpi(&dpiX, &dpiY);
D2D1_RENDER_TARGET_PROPERTIES rtDesc = D2D1::RenderTargetProperties(
D2D1_RENDER_TARGET_TYPE_HARDWARE,
D2D1::PixelFormat(DXGI_FORMAT_UNKNOWN, D2D1_ALPHA_MODE_PREMULTIPLIED),
dpiX,
dpiY
);
AssertHResult(this->factory->CreateDxgiSurfaceRenderTarget(
surface.Get(),
&rtDesc,
&this->renderTarget
), "Failed to create D2D render target");
Here, dpiX and dpiY become 96 which I notice is also the constant that GetDpiForWindow() from the windows API returns when it is not dpi aware.
I want to know how I can fix my code so that it will draw the circle at the position given by GetCursorPos().
More relevant code:
Driver code
Vector3f cursPos = input.GetCursorPos();
DrawCircle(Colour::Green, cursPos.x, cursPos.y, 3/*radius*/);
Input
POINT pt{};
::GetCursorPos(&pt);
// Convert from screen pixels to client pixels
return ConvertPixelSpace(this->hWnd, (float)pt.x, (float)pt.x, PixelSpace::Screen, PixelSpace::Client);
Direct2D
void DrawCircle(const Colour& c, float centreX, float centreY, float radius, PixelSpace ps)
{
Vector3f centre = ConvertPixelSpace(this->gfx.hWnd, centreX, centreY, ps, PixelSpace::Client);
centreX = centre.x;
centreY = centre.y;
D2D1_ELLIPSE el{};
el.point.x = centreX;
el.point.y = centreY;
el.radiusX = radius;
el.radiusY = radius;
auto brush = this->CreateBrush(c);
this->renderTarget->FillEllipse(
&el,
brush.Get()
);
}
PixelSpace Conversion
Vector3f ConvertPixelSpace(HWND hWnd, float x, float y, PixelSpace curSpace, PixelSpace newSpace)
{
RECT rc = GetClientRectOfWindow(hWnd);
struct
{
float top, left, width, height;
} rectf;
rectf.top = static_cast<float>(rc.top);
rectf.left = static_cast<float>(rc.left);
rectf.width = static_cast<float>(rc.right - rc.left);
rectf.height = static_cast<float>(rc.bottom - rc.top);
// Convert to client space
if (curSpace == PixelSpace::Screen)
{
x -= rectf.left;
y -= rectf.top;
}
// Convert to new space
if (newSpace == PixelSpace::Screen)
{
x += rectf.left;
y += rectf.top;
}
return Vector3f(x, y);
}
RECT GetClientRectOfWindow(HWND hWnd)
{
RECT rc;
::GetClientRect(hWnd, &rc);
// Pretty sure these are valid casts.
// rc.top is stored directly after rc.left and this forms a POINT struct
ClientToScreen(hWnd, reinterpret_cast<POINT*>(&rc.left));
ClientToScreen(hWnd, reinterpret_cast<POINT*>(&rc.right));
return rc;
}
The problem was I was creating the D3D11 swapchain with the window area instead of the client area.
RECT rect{};
GetWindowRect(hWnd, &rect); // !!! This should be GetClientRect()
this->width = rect.right - rect.left;
this->height = rect.bottom - rect.top;
DXGI_SWAP_CHAIN_DESC scDesc{};
scDesc.BufferDesc.Width = width;
scDesc.BufferDesc.Height = height;
//...
EDIT: SOLVED
The problem was me using the renderstate functions I needed for alphablending outside of the Sprite->Begin() and Sprite->End() codeblock.
I am creating my own 2D engine within DirectX 9.0. I am using sprites with corresponding spritesheets to draw them. Now the problem is, if I set my blending to D3DSPR_SORT_TEXTURE, I'll be able to see the texture without any problems (including transformation matrices), however if I try and set it to D3DSPR_ALPHABLEND, the sprite won't display. I've tried several things; SetRenderState, change the image format from .png to .tga, add an alpha channel to the image with a black background, used another image used within an example of 2D blending, changed my D3DFMT_ parameter of my D3DManager, etc.
I'm tried searching for an answer here but didn't find any answers related to my question.
Here's some of my code which might be of importance;
D3DManager.cpp
parameters.BackBufferWidth = w; //Change Direct3D renderer size
parameters.BackBufferHeight = h;
parameters.BackBufferFormat = D3DFMT_UNKNOWN; //Colors
parameters.BackBufferCount = 1; //The amount of buffers to use
parameters.MultiSampleType = D3DMULTISAMPLE_NONE; //Anti-aliasing quality
parameters.MultiSampleQuality = 0;
parameters.SwapEffect = D3DSWAPEFFECT_DISCARD;
parameters.hDeviceWindow = window; //The window to tie the buffer to
parameters.Windowed = true; //Window mode, true or false
parameters.EnableAutoDepthStencil = NULL;
parameters.Flags = NULL; //Advanced flags
parameters.FullScreen_RefreshRateInHz = 0; //Fullscreen refresh rate, leave at 0 for auto and no risk
parameters.PresentationInterval = D3DPRESENT_INTERVAL_ONE; //How often to redraw
Sprite.cpp
void Sprite::draw(){
D3DXVECTOR2 center2D = D3DXVECTOR2(center.x,center.y);
D3DXMatrixTransformation2D(&matrix,¢er2D,NULL,&scale,¢er2D,angle,new D3DXVECTOR2(position.x,position.y));
sprite->SetTransform(&matrix);
sprite->Begin(D3DXSPRITE_ALPHABLEND);
if(!extended){
sprite->Draw(texture, NULL, NULL, &position, 0xFFFFFF);
}
else{
doAnimation();
sprite->Draw(texture, &src, ¢er, new D3DXVECTOR3(0,0,0), color);
}
sprite->End();
}
Main.cpp
//Clear the scene for drawing
void renderScene(){
d3dManager->getDevice().Clear(0,NULL,D3DCLEAR_TARGET,0x161616,1.0f,0); //Clear entire backbuffer
d3dManager->getDevice().BeginScene(); //Prepare scene for drawing
render(); //Render everything
d3dManager->getDevice().EndScene(); //Close off
d3dManager->getDevice().Present(NULL, NULL, NULL, NULL); //Present everything on-screen
}
//Render everything
void render(){
snake->draw();
}
I've got no clue at all. Any help would be appreciated.
The problem was me using the renderstate functions I needed for alphablending outside of the Sprite->Begin() and Sprite->End()
code block.
I am trying to make a program currently that outputs a polygon to the desktop for a simple animation. The problem I am currently running into is that the animation gets an "onion" effect because the desktop isn't refreshing. I have searched for a method to refresh the desktop however because it's an animation, none of the solutions can refresh it fast enough. Below is an example of my code:
#include <iostream>
#include <Windows.h>
#include <math.h>
#include <Shlobj.h>
int main() {
//start ambrose
POINT amby[5];
POINT pos;
/* hide console window */
ShowWindow(FindWindowA("ConsoleWindowClass", NULL), false);
/* Calling GetDC with argument 0 retrieves the desktop's DC */
HDC hDC_Desktop = GetDC(0);
//This is just an example of what I am doing
for (int i = 0; i < 10000; i++) {
pos.x = 600+sin(double(i)/50)*200;
pos.y = 500+cos(double(i)/50)*200;
amby[0].x = -10+pos.x;
amby[0].y = -10+pos.y;
amby[1].x = -50+pos.x;
amby[1].y = -50+pos.y;
amby[2].x = 50+pos.x;
amby[2].y = -50+pos.y;
Polygon(hDC_Desktop,amby, 3);
Sleep(10);
}
//The method I was trying before that didn't work VVVVV
//LPITEMIDLIST pidl;
//SHGetSpecialFolderLocation(NULL,CSIDL_DESKTOP,&pidl);
//SHChangeNotify(SHCNE_ASSOCCHANGED,SHCNF_IDLIST,pidl,0);
return 0;
}
Thanks
Edit
I have tried using invalidateRect as such:
...
for (int i = 0; i < 10000; i++) {
pos.x = 600+sin(double(i)/50)*200;
pos.y = 500+cos(double(i)/50)*200;
amby[0].x = -10+pos.x;
amby[0].y = -10+pos.y;
amby[1].x = -50+pos.x;
amby[1].y = -50+pos.y;
amby[2].x = 50+pos.x;
amby[2].y = -50+pos.y;
Polygon(hDC_Desktop,amby, 3);
InvalidateRect(GetDesktopWindow(),NULL, true);
Sleep(10);
}
...
I am wondering if there is anyway to call WM_ERASEBKGND or WM_DISPLAYCHANGE to force a change. Does anyone know if there is a way to call these?
I am not sure what you are trying to achieve. Let me just answer to problem of onion effect. A quick and dirty solution to erase what was drawn in the previous iteration could be to draw using XOR mode but the solution has a few downsides, like flicker and color could be arbitrary. A proper solution that would address both the downsides would be to do all the drawing in a memory DC and BitBlt the same to the screen.
Code for the quick and dirty solution would be -
SetROP2(hDC_Desktop,R2_XORPEN);
//This is just an example of what I am doing
for (int i = 0; i < 100; i++)
{
if(i!=0)
{
pos.x = 600+sin(double(i-1)/50)*200;
pos.y = 500+cos(double(i-1)/50)*200;
amby[0].x = -10+pos.x;
amby[0].y = -10+pos.y;
amby[1].x = -50+pos.x;
amby[1].y = -50+pos.y;
amby[2].x = 50+pos.x;
amby[2].y = -50+pos.y;
Polygon(hDC_Desktop,amby, 3);
}
pos.x = 600+sin(double(i)/50)*200;
pos.y = 500+cos(double(i)/50)*200;
amby[0].x = -10+pos.x;
amby[0].y = -10+pos.y;
amby[1].x = -50+pos.x;
amby[1].y = -50+pos.y;
amby[2].x = 50+pos.x;
amby[2].y = -50+pos.y;
Polygon(hDC_Desktop,amby, 3);
Sleep(10);
}
There's an easy solution, and that's to not actually draw on the desktop. Instead, create a transparent full-screen window. Since it's transparent, any pixel that you don't draw will show the desktop underneath. Hence, only your polygon pixels will hide the underlying desktop.
As a result, the desktop window never needs to be invalidated or repainted etc.
Why don't you use a transparent wnd.
class COverlayWnd : public CWnd
{
DECLARE_DYNAMIC(COverlayWnd)
public:
COverlayWnd();
virtual ~COverlayWnd();
protected:
afx_msg void OnPaint();
afx_msg int OnCreate(LPCREATESTRUCT lpCreateStruct);
DECLARE_MESSAGE_MAP()
};
// OverlayWnd.cpp : implementation file
//
The implementation. Just move the window if you want animations to run all over the desktop.
#include "stdafx.h"
// COverlayWnd
IMPLEMENT_DYNAMIC(COverlayWnd, CWnd)
COverlayWnd::COverlayWnd()
{
}
COverlayWnd::~COverlayWnd()
{
}
BEGIN_MESSAGE_MAP(COverlayWnd, CWnd)
ON_WM_PAINT()
ON_WM_CREATE()
END_MESSAGE_MAP()
void COverlayWnd::OnPaint()
{
CPaintDC dc(this);
CRect rect;
GetClientRect( &rect );
dc.FillSolidRect(&rect, RGB(1,1,1));
//paint other stuff that don't have RGB(1,1,1)
}
int COverlayWnd::OnCreate(LPCREATESTRUCT lpCreateStruct)
{
if (CWnd::OnCreate(lpCreateStruct) == -1)
return -1;
BOOL bRet = 0;
bRet = ModifyStyleEx(0,WS_EX_LAYERED|WS_EX_TRANSPARENT);
bRet = ModifyStyle(DS_SETFONT | DS_MODALFRAME | DS_FIXEDSYS | WS_POPUP | WS_CAPTION | WS_SYSMENU,0);
bRet = ModifyStyle(WS_POPUP,0);
bRet = SetLayeredWindowAttributes(RGB(1,1,1),0,LWA_COLORKEY);
//the RGB(1,1,1) is the transparent color
ASSERT(bRet);
//this->EnableWindow(FALSE);
return 0;
}
i have a problem with revolutejoint. When i make a line with litlle boxes and revoluteJoint i noticed a strange behavior of my first box. it separates from rest of the boxes.
You can see it here:
Youtube
You can compile it, and you'll see what im talking about…
HelloWorldScene.h
// When you import this file, you import all the cocos2d classes
#import "cocos2d.h"
#import "Box2D.h"
#import "GLES-Render.h"
// HelloWorld Layer
#interface HelloWorld : CCLayer
{
b2World* world;
GLESDebugDraw *m_debugDraw;
}
// returns a Scene that contains the HelloWorld as the only child
+(id) scene;
-(void) Test;
#end
HelloWorldScene.mm
// Import the interfaces
#import "HelloWorldScene.h"
//Pixel to metres ratio. Box2D uses metres as the unit for measurement.
//This ratio defines how many pixels correspond to 1 Box2D "metre"
//Box2D is optimized for objects of 1x1 metre therefore it makes sense
//to define the ratio so that your most common object type is 1x1 metre.
#define PTM_RATIO 32
// enums that will be used as tags
enum {
kTagTileMap = 1,
kTagBatchNode = 1,
kTagAnimation1 = 1,
};
// HelloWorld implementation
#implementation HelloWorld
+(id) scene
{
// 'scene' is an autorelease object.
CCScene *scene = [CCScene node];
// 'layer' is an autorelease object.
HelloWorld *layer = [HelloWorld node];
// add layer as a child to scene
[scene addChild: layer];
// return the scene
return scene;
}
// initialize your instance here
-(id) init
{
if( (self=[super init])) {
// enable touches
self.isTouchEnabled = YES;
// enable accelerometer
self.isAccelerometerEnabled = YES;
CGSize screenSize = [CCDirector sharedDirector].winSize;
CCLOG(#"Screen width %0.2f screen height %0.2f",screenSize.width,screenSize.height);
// Define the gravity vector.
b2Vec2 gravity;
gravity.Set(0.0f, -10.0f);
// Do we want to let bodies sleep?
// This will speed up the physics simulation
bool doSleep = true;
// Construct a world object, which will hold and simulate the rigid bodies.
world = new b2World(gravity, doSleep);
world->SetContinuousPhysics(true);
// Debug Draw functions
m_debugDraw = new GLESDebugDraw( PTM_RATIO );
world->SetDebugDraw(m_debugDraw);
uint32 flags = 0;
flags += b2DebugDraw::e_shapeBit;
flags += b2DebugDraw::e_jointBit;
// flags += b2DebugDraw::e_aabbBit;
// flags += b2DebugDraw::e_pairBit;
// flags += b2DebugDraw::e_centerOfMassBit;
m_debugDraw->SetFlags(flags);
// Define the ground body.
b2BodyDef groundBodyDef;
groundBodyDef.position.Set(0, 0); // bottom-left corner
// Call the body factory which allocates memory for the ground body
// from a pool and creates the ground box shape (also from a pool).
// The body is also added to the world.
b2Body* groundBody = world->CreateBody(&groundBodyDef);
// Define the ground box shape.
b2PolygonShape groundBox;
// bottom
groundBox.SetAsEdge(b2Vec2(0,0), b2Vec2(screenSize.width/PTM_RATIO,0));
groundBody->CreateFixture(&groundBox,0);
// top
groundBox.SetAsEdge(b2Vec2(0,screenSize.height/PTM_RATIO), b2Vec2(screenSize.width/PTM_RATIO,screenSize.height/PTM_RATIO));
groundBody->CreateFixture(&groundBox,0);
// left
groundBox.SetAsEdge(b2Vec2(0,screenSize.height/PTM_RATIO), b2Vec2(0,0));
groundBody->CreateFixture(&groundBox,0);
// right
groundBox.SetAsEdge(b2Vec2(screenSize.width/PTM_RATIO,screenSize.height/PTM_RATIO), b2Vec2(screenSize.width/PTM_RATIO,0));
groundBody->CreateFixture(&groundBox,0);
//Set up sprite
[self Test];
[self schedule: #selector(tick:)];
}
return self;
}
- (void) Test {
// Circle
b2Body *circle1;
b2BodyDef bd1;
bd1.position.Set(45.0f/PTM_RATIO, 180.0f/PTM_RATIO);
bd1.type = b2_kinematicBody;
bd1.fixedRotation = false;
bd1.allowSleep = false;
circle1 = world->CreateBody(&bd1);
b2CircleShape shapecircle1;
shapecircle1.m_radius = 0.5f;
b2FixtureDef fdcircle1;
fdcircle1.shape = &shapecircle1;
fdcircle1.density = 2.0f;
fdcircle1.friction = 2.0f;
circle1->CreateFixture(&fdcircle1);
// Boxes
b2PolygonShape shape;
shape.SetAsBox(6.0f/PTM_RATIO, 0.125f);
b2FixtureDef fd;
fd.shape = &shape;
fd.density = 20.0f;
fd.friction = 0.2f;
b2RevoluteJointDef jd;
jd.collideConnected = false;
const float32 y = 9.0f;
b2BodyDef bd;
bd.type = b2_dynamicBody;
bd.position.Set(15.0f/PTM_RATIO, y);
b2Body* prevBody = world->CreateBody(&bd);
prevBody->CreateFixture(&fd);
b2Vec2 anchor(float32(0), y);
for (int32 i = 1; i < 8; ++i)
{
b2BodyDef bd;
bd.type = b2_dynamicBody;
bd.position.Set((15.0f + (i*10))/PTM_RATIO, y);
b2Body* body = world->CreateBody(&bd);
body->CreateFixture(&fd);
b2Vec2 anchor(float32(i*10)/PTM_RATIO, y);
jd.Initialize(prevBody, body, anchor);
world->CreateJoint(&jd);
prevBody = body;
}
}
-(void) draw
{
// Default GL states: GL_TEXTURE_2D, GL_VERTEX_ARRAY, GL_COLOR_ARRAY, GL_TEXTURE_COORD_ARRAY
// Needed states: GL_VERTEX_ARRAY,
// Unneeded states: GL_TEXTURE_2D, GL_COLOR_ARRAY, GL_TEXTURE_COORD_ARRAY
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
world->DrawDebugData();
// restore default GL states
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
}
-(void) tick: (ccTime) dt
{
//It is recommended that a fixed time step is used with Box2D for stability
//of the simulation, however, we are using a variable time step here.
//You need to make an informed choice, the following URL is useful
//http://gafferongames.com/game-physics/fix-your-timestep/
int32 velocityIterations = 8;
int32 positionIterations = 1;
// Instruct the world to perform a single step of simulation. It is
// generally best to keep the time step and iterations fixed.
world->Step(dt, velocityIterations, positionIterations);
//Iterate over the bodies in the physics world
for (b2Body* b = world->GetBodyList(); b; b = b->GetNext())
{
if (b->GetUserData() != NULL) {
//Synchronize the AtlasSprites position and rotation with the corresponding body
CCSprite *myActor = (CCSprite*)b->GetUserData();
myActor.position = CGPointMake( b->GetPosition().x * PTM_RATIO, b->GetPosition().y * PTM_RATIO);
myActor.rotation = -1 * CC_RADIANS_TO_DEGREES(b->GetAngle());
}
}
}
// on "dealloc" you need to release all your retained objects
- (void) dealloc
{
// in case you have something to dealloc, do it in this method
delete world;
world = NULL;
delete m_debugDraw;
// don't forget to call "super dealloc"
[super dealloc];
}
#end
Any idea? Thanks in advance
The problem in your case is that your first box makes a 360 degree rotation around the anchor. This can happen to first and last box. I think you need to restrict the 360 degree rotation for your join. You have do define that maximum angle that your box can achieve. You can do it by using motors in your joints. Following is some code from Box2D manual to create motors for your joints,
b2RevoluteJointDef jointDef;
jointDef.Initialize(body1, body2, myBody1->GetWorldCenter());
jointDef.lowerAngle = -0.5f * b2_pi; // -90 degrees
jointDef.upperAngle = 0.25f * b2_pi; // 45 degrees
jointDef.enableLimit = true;
jointDef.maxMotorTorque = 10.0f;
jointDef.motorSpeed = 0.0f;
jointDef.enableMotor = true;
I hope it helps.
How to find transformation for static mesh?
I'll be using it for my picking function.
I have tried this but it didn't work...
void StaticMesh::Render(void)
{
D3DXMATRIX matWorld;
D3DXMatrixIdentity(&matWorld);
device->SetTransform(D3DTS_WORLD, &matWorld);
for(DWORD i = 0; i < numMaterials; i++) // loop through each subset
{
device->SetMaterial(&material[i]); // set the material for the subset
device->SetTexture(0, texture[i]); // ...then set the texture
mesh->DrawSubset(i); // draw the subset
}
s = &matWorld; // THIS WOULD BE THE TRANSFORMATION FOR THE OBJECT
}
thanks in advance.
Edit 2:
Here is full class
#include "StdAfx.h"
#include "StaticMesh.h"
StaticMesh::StaticMesh(LPDIRECT3DDEVICE9* dev)
{
d3ddev=dev;
device = *d3ddev;
}
StaticMesh::~StaticMesh(void)
{
}
void StaticMesh::Render(void)
{
LPDIRECT3DDEVICE9 device=*d3ddev;
D3DXMATRIX matWorld;
D3DXMatrixIdentity(&matWorld);
device->SetTransform(D3DTS_WORLD, &matWorld);
for(DWORD i = 0; i < numMaterials; i++) // loop through each subset
{
device->SetMaterial(&material[i]); // set the material for the subset
device->SetTexture(0, texture[i]); // ...then set the texture
mesh->DrawSubset(i); // draw the subset
}
s = matWorld;
}
StaticMesh* StaticMesh::LoadXFile(LPCWSTR fileName, LPDIRECT3DDEVICE9* dev)
{
StaticMesh *obj = this;
obj = new StaticMesh(dev);
obj->Load(fileName);
return obj;
}
void StaticMesh::Load(LPCWSTR fileName)
{
D3DXLoadMeshFromX(fileName, // load this file
D3DXMESH_SYSTEMMEM, // load the mesh into system memory
device, // the Direct3D Device
NULL, // we aren't using adjacency
&bufMeshMaterial, // put the materials here
NULL, // we aren't using effect instances
&numMaterials, // the number of materials in this model
&mesh); // put the mesh here
// retrieve the pointer to the buffer containing the material information
D3DXMATERIAL* tempMaterials = (D3DXMATERIAL*)bufMeshMaterial->GetBufferPointer();
// create a new material buffer and texture for each material in the mesh
material = new D3DMATERIAL9[numMaterials];
texture = new LPDIRECT3DTEXTURE9[numMaterials];
for(DWORD i = 0; i < numMaterials; i++) // for each material...
{
// Copy the material
material[i] = tempMaterials[i].MatD3D;
// Set the ambient color for the material (D3DX does not do this)
material[i].Ambient = material[i].Diffuse;
// Create the texture if it exists - it may not
texture[i] = NULL;
if (tempMaterials[i].pTextureFilename)
{
D3DXCreateTextureFromFileA(device, tempMaterials[i].pTextureFilename,&texture[i]);
}
}
}
void StaticMesh::CleanUp(void)
{
mesh->Release();
}
EDIT 3:
void GUIDialog::Picking(HWND hWnd, int status)
{
LPDIRECT3DDEVICE9 device = *d3ddev;
D3DXMATRIX matProj;
POINT pt;
D3DVIEWPORT9 vp;
D3DXMATRIX *matWorld=NULL;
D3DXMATRIX matView;
GetCursorPos(&pt);
ScreenToClient(hWnd, &pt);
device->GetTransform(D3DTS_PROJECTION, &matProj);
device->GetViewport(&vp);
device->GetTransform(D3DTS_VIEW, &matView);
for(int i=0; (int)edit.size() > i; i++)
{
matWorld=&edit.at(i)->staticMesh->s;
// Use inverse of matrix
D3DXVECTOR3 rayPos((float)pt.x, (float)pt.y,0); // near-plane position
D3DXVECTOR3 rayDir((float)pt.x, (float)pt.y,1); // far-plane position
D3DXVec3Unproject(&rayPos,&rayPos,&vp,&matProj,&matView,matWorld);
D3DXVec3Unproject(&rayDir,&rayDir,&vp,&matProj,&matView,matWorld);
rayDir -= rayPos; // make a direction from the 2 positions
D3DXVec3Normalize(&rayDir,&rayDir);
if(FAILED(D3DXIntersect(edit.at(i)->staticMesh->mesh, &rayPos, &rayDir, &edit.at(i)->staticMesh->hasHit, NULL, NULL, NULL, &edit.at(i)->staticMesh->
distanceToCollision, NULL, NULL)))
{
PostQuitMessage(0);
};
if(edit.at(i)->staticMesh->hasHit!=0&&status==WM_LBUTTONUP)
{
if(status==WM_LBUTTONUP)
EventProc(HIT, *edit.at(i));
}
}
}
Well your code above is correct matWorld IS the transform of the object. If you wish to transform a ray into object local space (ie to the space prior to the matWorld transformation) then you simply need to multiply your world space ray by the inverse of matWorld ...