I am rendering a spline in DirectX 11, but I'm having an issue where it appears to be stuck in screen space, and I can't convince it to be in world space.
The spline is initially defined as a std::vector<DirectX::XMFLOAT3> of the control points, which is expanded to a vector of the same of the actual points on the spline called linePoints. Then the vertex, index and constant buffers are created in this function:
void Spline::createBuffers(ID3D11Device* device)
{
Vertex vertices[100];
for (int i = 0; i < 100; i++)
{
Vertex v;
v.position = linePoints.at(i);
v.colour = XMFLOAT4(0, 0, 0, 1.0);
vertices[i] = v;
}
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(D3D11_BUFFER_DESC));
bd.Usage = D3D11_USAGE_DEFAULT;
bd.ByteWidth = sizeof(Vertex) * linePoints.size();
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = vertices;
device->CreateBuffer(&bd, &InitData, &vertexBuffer);
ZeroMemory(&bd, sizeof(D3D11_BUFFER_DESC));
bd.Usage = D3D11_USAGE_DEFAULT;
bd.CPUAccessFlags = 0;
bd.ByteWidth = sizeof(WORD) * linePoints.size();
bd.BindFlags = D3D11_BIND_INDEX_BUFFER;
WORD indices[200];
int count = 0;
for (WORD i = 0; i < 100; i++)
{
indices[count] = i;
indices[count + 1] = i + 1;
count += 2;
}
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = indices;
device->CreateBuffer(&bd, &InitData, &indexBuffer);
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DEFAULT;
bd.ByteWidth = sizeof(LineCBuffer);
bd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
bd.CPUAccessFlags = 0;
device->CreateBuffer(&bd, nullptr, &constBuffer);
}
The draw function is:
void Spline::Draw(ID3D11PixelShader* pShader, ID3D11VertexShader* vShader, Camera& cam)
{
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_LINESTRIP);
context->VSSetShader(vShader, nullptr, 0);
context->PSSetShader(pShader, nullptr, 0);
LineCBuffer lCB;
lCB.world = XMMatrixIdentity();
lCB.view = XMMatrixTranspose(cam.getView());
lCB.projection = XMMatrixTranspose(cam.getProjection());
context->UpdateSubresource(constBuffer, 0, nullptr, &lCB, 0, 0);
context->VSSetConstantBuffers(0, 1, &constBuffer);
UINT stride = sizeof(Vertex);
UINT offset = 0;
context->IASetVertexBuffers(0, 1, &vertexBuffer, &stride, &offset);
context->IASetIndexBuffer(indexBuffer, DXGI_FORMAT_R16_UINT, 0);
context->DrawIndexed(100, 0, 0);
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
}
And the entire shader file is:
cbuffer lineCBuffer : register(b0)
{
matrix World;
matrix View;
matrix Projection;
};
struct VS_IN
{
float3 position : POSITION;
float4 colour : COLOUR;
};
struct VS_OUT
{
float4 pos : SV_POSITION;
float4 colour : COLOUR;
};
VS_OUT lineVertexShader(float3 position : POSITION, float4 colour : COLOUR)
{
VS_OUT output = (VS_OUT)0;
output.pos = mul(position, World);
output.pos = mul(output.pos, View);
output.pos = mul(output.pos, Projection);
output.pos.w = 1.0f;
output.colour = colour;
return output;
}
float4 linePixelShader(VS_OUT input) : SV_TARGET
{
return input.colour;
}
The issue is that the start of the line (especially when it is set to (0,0,0)) is anchored to the viewport. When the start if the line is at (0,0,0), it will not leave the centre of the screen, even when it should be offscreen.
I think you are doing something wrong with your multiplication.
VS_OUT lineVertexShader(float3 position : POSITION, float4 colour : COLOUR)
{
VS_OUT output = (VS_OUT)0;
output.pos = mul(position, World);
output.pos = mul(output.pos, View);
output.pos = mul(output.pos, Projection);
output.pos.w = 1.0f;
output.colour = colour;
return output;
}
You use a float3 as position input. Position is a Point, so you should use a
float4(position,1.0f)
as a point in 3D Space. If your float3 would be a vector, you create a
float4(position,0.0f)
So your Vertex Shader should look like the following:
VS_OUT lineVertexShader(float3 position : POSITION, float4 colour : COLOUR)
{
VS_OUT output;
output.pos = mul(float4(position,1.0), mul(mul(World,View),Projection));
output.colour = colour;
return output;
}
One more thing. Do not set pos.w to 1! The Rasterrizer is doing the perspective devide automatically to the value of SV_POSITION. The homogenous value is then passed to the pixel shader. Sometimes you really want to set z and w to 1, maybe for your cubemap rendered at max distance or stuff like that. But i think thats another error here.
When you dont need to use world, view and projection in extra multiplications, why do not precompute it on the cpu and then just push the final worldViewProj matrix to your shaders?
Good Look
Related
I'm new to directx and I need to set up opacity in my shaders. I don't use textures or anything and I just need to add opacity to whole objects based on one variable in shader.
I've tried to simply put this variable in shader's output color alpha channel, but it didn't work and I guess that the solution of this problem is not in shaders. This is my shader so far.
cbuffer ConstantBuffer : register(b0)
{
matrix World;
matrix View;
matrix Projection;
float4 vLigthDir[2];
float4 vLigthColor[2];
float4 vOutputColor;
float intensity;
float3 colors;
}
struct VS_INPUT
{
float4 pos : POSITION;
float3 norm : NORMAL;
};
struct PS_INPUT
{
float4 pos : SV_POSITION;
float3 norm : TEXCOORD0;
};
PS_INPUT VS(VS_INPUT input)
{
PS_INPUT output = (PS_INPUT) 0;
output.pos = mul(input.pos, World);
output.pos = mul(output.pos, View);
output.pos = mul(output.pos, Projection);
output.norm = mul(input.norm, World);
return output;
}
float4 PS(PS_INPUT input) : SV_Target
{
float4 finalColor = 0;
finalColor.r = colors.r;
finalColor.g = colors.g;
finalColor.b = colors.b;
finalColor.a = intensity;
return finalColor;
}
float4 PSSolid(PS_INPUT input) : SV_Target
{
return vOutputColor;
}
//Create BlendState
D3D11_BLEND_DESC blendDesc;
blendDesc.AlphaToCoverageEnable = false;
blendDesc.IndependentBlendEnable = false;
blendDesc.RenderTarget[0].BlendEnable = true;
blendDesc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
blendDesc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
blendDesc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_ONE;
blendDesc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ZERO;
blendDesc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
hr = g_pd3dDevice->CreateBlendState(&blendDesc, &pBlendState);
//Using BlendState
float blendFactor[4] = { 1.0f, 1.0f, 1.0f, 1.0f };
UINT sampleMask = 0xffffffff;
g_pImmediateContext->OMGetBlendState(&pBlendState, blendFactor, &sampleMask);
///////////Rendering geometry part///////////
g_pImmediateContext->OMGetBlendState(NULL, NULL, NULL);
I expect some objects to have opacity, but instead nothing happenening at all, there are just solid color.
basically I am trying to render a scene to a texture as in this ogl tutorial here but in DirectX 11, and I faced some issues:
Absolutely nothing is rendered when I launch the program IDK why.
The only thing the texture displays 'correctly' is the clear color.
I have examined the executable in RenderDoc, and in the captured frame the back buffer draws the quad and the texture on it displays the scene correctly!
Source code peak:
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(D3D11_TEXTURE2D_DESC));
texDesc.Width = Data.Width;
texDesc.Height = Data.Height;
texDesc.Format = R32G32B32A32_FLOAT;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.CPUAccessFlags = 0;
texDesc.ArraySize = 1;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_RENDER_TARGET;
texDesc.MiscFlags = 0;
texDesc.MipLevels = 1;
if (Data.Img_Data_Buf == NULL)
{
if (FAILED(DX11Context::GetDevice()->CreateTexture2D(&texDesc, NULL, &result->tex2D)))
{
Log.Error("[DirectX] Texture2D Creation Failed for Null-ed Texture2D!\n");
return;
}
D3D11_SHADER_RESOURCE_VIEW_DESC srvDesc;
srvDesc.Format = texDesc.Format;
srvDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srvDesc.Texture2D.MostDetailedMip = 0;
srvDesc.Texture2D.MipLevels = 1;
DX11Context::GetDevice()->CreateShaderResourceView(result->tex2D, &srvDesc, &result->resourceView);
return;
}
//depth stencil texture
D3D11_TEXTURE2D_DESC texDesc;
{
texDesc.Width = size.x;
texDesc.Height = size.y;
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DEFAULT;
texDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
texDesc.CPUAccessFlags = 0;
texDesc.MiscFlags = 0;
}
if (FAILED(API::DirectX::DX11Context::GetDevice()->CreateTexture2D(&texDesc, nullptr, &depthstenciltex)))
{
Log.Error("[DX11RenderTarget] Failed to create DepthStencilTexture for render-target!\n");
//Return or the next call will fail too
return;
}
if (FAILED(API::DirectX::DX11Context::GetDevice()->CreateDepthStencilView(depthstenciltex, nullptr, &depthstencilview)))
{
Log.Error("[DX11RenderTarget] Failed to create DepthStencilView for render-target!\n");
}
//render target
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc;
ZeroMemory(&renderTargetViewDesc, sizeof(D3D11_RENDER_TARGET_VIEW_DESC));
renderTargetViewDesc.Format = texDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
ID3D11RenderTargetView* rtv;
if (FAILED(API::DirectX::DX11Context::GetDevice()->CreateRenderTargetView(texture->tex2D, &renderTargetViewDesc, &rtv)))
{
Log.Error("[DX11RenderTarget] Failed to create render-target-view (RTV)!\n");
return;
}
//binding
Context->OMSetRenderTargets(1, &rtv, rt->depthstenciltex);
Shaders:
std::string VertexShader = R"(struct VertexInputType
{
float4 position : POSITION;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
cbuffer NE_Camera : register(b0)
{
matrix Model;
matrix View;
matrix Projection;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// Calculate the position of the vertex against the world, view, and projection matrices.
output.position = mul(Model, input.position);
output.position = mul(View, output.position);
output.position = mul(Projection, output.position);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
})";
std::string PixelShader = R"(
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D NE_Tex_Diffuse : register(t0);
SamplerState NE_Tex_Diffuse_Sampler : register(s0);
float4 main(PixelInputType input) : SV_TARGET
{
return NE_Tex_Diffuse.Sample(NE_Tex_Diffuse_Sampler, input.tex);
}
)";
std::string ScreenVertexShader = R"(struct VertexInputType
{
float2 position : POSITION;
float2 tex : TEXCOORD;
};
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
PixelInputType main(VertexInputType input)
{
PixelInputType output;
// CalcSulate the position of the vertex against the world, view, and projection matrices.
output.position = float4(input.position.x,input.position.y,0.0f,1.0f);
// Store the input texture for the pixel shader to use.
output.tex = input.tex;
return output;
})";
std::string ScreenPixelShader = R"(
struct PixelInputType
{
float4 position : SV_POSITION;
float2 tex : TEXCOORD;
};
Texture2D ScreenTexture : register(t0);
SamplerState ScreenTexture_Sampler : register(s0);
float4 main(PixelInputType input) : SV_TARGET
{
return float4(ScreenTexture.Sample(ScreenTexture_Sampler, input.tex).rgb, 1.0f);
}
)";
Full Source Code
Also I captured a frame with visual studio graphics debugger, and noticed that the render to texture draw call has the PS shader with "stage didn't run, no output".
Note: I know that the scene should be flipped in DirectX.
I have found the bug causing this problem, I wasn't clearing the depth stencil view at rendering, I wonder why is clearing the DSV essential for RenderTarget output.
I have an application based on the Windows8-DirectX template from the Visual Studio and i'm trying to draw a texture with an alpha channel with the following D3D11_BLEND_DESC:
D3D11_BLEND_DESC blendDesc{};
blendDesc.RenderTarget[0].BlendEnable = TRUE;
blendDesc.RenderTarget[0].SrcBlend = D3D11_BLEND_SRC_ALPHA;
blendDesc.RenderTarget[0].DestBlend = D3D11_BLEND_INV_SRC_ALPHA;
blendDesc.RenderTarget[0].SrcBlendAlpha = D3D11_BLEND_INV_DEST_ALPHA;
blendDesc.RenderTarget[0].DestBlendAlpha = D3D11_BLEND_ONE;
blendDesc.RenderTarget[0].BlendOp = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].BlendOpAlpha = D3D11_BLEND_OP_ADD;
blendDesc.RenderTarget[0].RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL;
ThrowIfFailed(device->CreateBlendState(&blendDesc, &_blendState));
And i'm binding this blend state this way:
float blendFactor[4] = { 0.0f, 0.0f, 1.0f, 1.0f };
UINT sampleMask = 0xffffffff;
context->OMSetBlendState(_blendState.Get(), blendFactor, sampleMask);
Here is my Vertex Shader:
cbuffer ModelViewProjectionConstantBuffer : register(b0)
{
matrix model;
matrix view;
matrix projection;
};
struct VertexShaderInput
{
float3 pos : POSITION;
float2 texcoord : TEXCOORD;
};
struct PixelShaderInput
{
float4 pos : SV_POSITION;
float2 texcoord : TEXCOORD;
};
PixelShaderInput main(VertexShaderInput input)
{
PixelShaderInput output;
float4 pos = float4(input.pos, 1.0f);
pos = mul(pos, model);
pos = mul(pos, view);
pos = mul(pos, projection);
output.pos = pos;
output.texcoord = input.texcoord;
return output;
}
Pixel Shader:
Texture2D tex0;
SamplerState s0;
struct PixelShaderInput
{
float4 pos : SV_POSITION;
float2 texcoord : TEXCOORD;
};
float4 main(PixelShaderInput input) : SV_TARGET
{
float4 color = tex0.Sample(s0, input.texcoord);
return color;
}
And that is my texture:
D3D11_TEXTURE2D_DESC textureDesc{};
textureDesc.Width = width;
textureDesc.Height = height;
textureDesc.Format = DXGI_FORMAT_B8G8R8A8_UNORM;
textureDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
textureDesc.MipLevels = textureDesc.ArraySize = 1;
textureDesc.SampleDesc.Count = 1;
ThrowIfFailed(device->CreateTexture2D(&textureDesc, NULL, &_texture));
ThrowIfFailed(device->CreateShaderResourceView(_texture.Get(), NULL, &_textureView));
But the problem is that the blending is not working properly. An texture is only blending with the background, but not with another texture, eg. if i call the ClearRenderTargetView method with blue color as third parameter then the partially transparent texture will be bluish but will not be blended with an overlapping texture.
Images with examples of the problem: http://imgur.com/gallery/vmMyk
Hi,
I need some help with rendering 2D objects in 3D scene with 3D camera. I think I managed to solve 2D coordinates with LH world coordinates. However, my rendered 2D objects are in a correct place, only when camera is at [0.0f, 0.0f, 0.0f] coordinates. In every other position, the location of 2D objects on scene is malformed. I think my matrices are screwed up, but don't know where to look further. I'd appreciate good ideas, please comment if something's missing for you, I'll edit the main post to provide you more information.
I'm using simple 3D color HLSL (VS and PS ver: 4.0) shader with alpha blending for the bigger triangle:
cbuffer ConstantBuffer : register( b0 )
{
matrix World;
matrix View;
matrix Projection;
}
struct VS_INPUT
{
float4 Pos : POSITION;
float4 Color : COLOR;
};
struct PS_INPUT
{
float4 Pos : SV_POSITION;
float4 Color : COLOR;
};
PS_INPUT VS ( VS_INPUT input )
{
PS_INPUT output = (PS_INPUT)0;
input.Pos.w = 1.0f;
output.Pos = mul ( input.Pos, World );
output.Pos = mul ( output.Pos, View );
output.Pos = mul ( output.Pos, Projection );
output.Color = input.Color;
return output;
}
float4 PS ( PS_INPUT input ) : SV_Target
{
return input.Color;
}
That's my Vertex data struct:
struct Vertex
{
DirectX::XMFLOAT3 position;
DirectX::XMFLOAT4 color;
Vertex() {};
Vertex(DirectX::XMFLOAT3 aPosition, DirectX::XMFLOAT4 aColor)
: position(aPosition)
, color(aColor)
{};
};
Render call for object:
bool PrimitiveMesh::Draw()
{
unsigned int stride = sizeof(Vertex);
unsigned int offset = 0;
D3DSystem::GetD3DDeviceContext()->IASetVertexBuffers(0, 1, &iVertexBuffer, &stride, &offset);
D3DSystem::GetD3DDeviceContext()->IASetIndexBuffer(iIndexBuffer, DXGI_FORMAT_R32_UINT, 0);
D3DSystem::GetD3DDeviceContext()->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return true;
}
Draw call with initialization:
static PrimitiveMesh* mesh;
if (mesh == 0)
{
std::vector<PrimitiveMesh::Vertex> vertices;
mesh = new PrimitiveMesh();
DirectX::XMFLOAT4 color = { 186 / 256.0f, 186 / 256.0f, 186 / 256.0f, 0.8f };
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 0.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(0.0f, 600.0f, 0.0f), color });
vertices.push_back({ DirectX::XMFLOAT3(800.0f, 600.0f, 0.0f), color });
mesh->SetVerticesAndIndices(vertices);
}
// Getting clean matrices here:
D3D::Matrices(world, view, projection, ortho);
iGI->TurnZBufferOff();
iGI->TurnOnAlphaBlending();
mesh->Draw();
XMMATRIX view2D = Camera::View2D();
iColorShader->Render(iGI->GetContext(), 3, &world, &view2D, &ortho);
iGI->TurnZBufferOn();
These are my 2D calculations for camera:
up = DirectX::XMVectorSet(0.0f, 1.0f, 0.0f, 0.0f);
lookAt = DirectX::XMVectorSet(0.0f, 0.0f, 1.0f, 0.0f);
rotationMatrix = DirectX::XMMatrixRotationRollPitchYaw(0.0f, 0.0f, 0.0f); // (pitch, yaw, roll);
up = DirectX::XMVector3TransformCoord(up, rotationMatrix);
lookAt = DirectX::XMVector3TransformCoord(lookAt, rotationMatrix) + position;
view2D = DirectX::XMMatrixLookAtLH(position, lookAt, up);
I'll appreciate any help.
Kind regards.
With Shaders, you are not forced to use matrices, you have the flexibility to simplify the problem.
Let say you render 2d objects, using coordinates in pixels, the only requirement, is to scale offset them back into the normalized projective space.
A vertex shader could be as short as that :
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
PS_INPUT VS ( VS_INPUT input ) {
PS_INPUT output;
output.Pos.xy = input.Pos.xy * rcpDim * 2; // from pixel to [0..2]
output.Pos.xy -= 1; // to [-1..1]
output.Pos.y *= -1; // because top left in texture space is bottom left in projective space
output.Pos.zw = float2(0,1);
output.Color = input.Color;
return output;
}
You can of course build a set of matrices achieving the same result with your original shader, just set World and View to identity and projection to an ortho projection with XMMatrixOrthographicOffCenterLH(0,width,0,height,0,1). But as you are beggining with 3D programming, you will soon have to learn to deal with multiple shaders anyway, so take it as an exercice.
Well, I fixed my problem. For some weird reason, DirectXMath was generating false XMMATRIX. My XMMatrixOrtographicLH() was completely incorrect for good parameters. I solved my problem with classic definition of Ortohraphic matrix, found in this article (definition in Fig. 10)
auto orthoMatrix = DirectX::XMMatrixIdentity();
orthoMatrix.r[0].m128_f32[0] = 2.0f / Engine::VideoSettings::Current()->WindowWidth();
orthoMatrix.r[1].m128_f32[1] = 2.0f / Engine::VideoSettings::Current()->WindowHeight();
orthoMatrix.r[2].m128_f32[2] = -(2.0f / (screenDepth - screenNear));
orthoMatrix.r[2].m128_f32[3] = -(screenDepth + screenNear) / (screenDepth - screenNear);
galop1n give a good solution but on my system
cbuffer ConstantBuffer : register( b0 ) {
float2 rcpDim; // 1 / renderTargetSize
}
NEED to be a multiple of 16 for made like here:
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
DirectX::XMFLOAT2 rcpDim2;
};
// Supply the vertex shader constant data.
VS_CONSTANT_BUFFER VsConstData;
VsConstData.rcpDim = { 2.0f / w,2.0f / h};
// Fill in a buffer description.
D3D11_BUFFER_DESC cbDesc;
ZeroMemory(&cbDesc, sizeof(cbDesc));
cbDesc.ByteWidth = sizeof(VS_CONSTANT_BUFFER);
cbDesc.Usage = D3D11_USAGE_DYNAMIC;
cbDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
cbDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
cbDesc.MiscFlags = 0;
cbDesc.StructureByteStride = 0;
// Fill in the subresource data.
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory(&InitData, sizeof(InitData));
InitData.pSysMem = &VsConstData;
InitData.SysMemPitch = 0;
InitData.SysMemSlicePitch = 0;
// Create the buffer.
HRESULT hr = pDevice->CreateBuffer(&cbDesc, &InitData,
&pConstantBuffer11);
or aligned
__declspec(align(16))
struct VS_CONSTANT_BUFFER
{
DirectX::XMFLOAT2 rcpDim;
};
I'm trying to create a Cube Map in DirectX 9, but for some reason it's not working. I've used DirectX's Texture Utility to create a dds texture file for the cube, but when I draw it, it's only drawing a solid color. Here's the code I've done:
SkyBox.h
#pragma once
#include<D3DX9Mesh.h>
#include"DirectX.h"
class SkyBox{
public:
SkyBox(LPCSTR textureFile);
~SkyBox();
void Draw();
protected:
IDirect3DCubeTexture9* texture;
LPD3DXMESH mesh;
};
SkyBox.cpp
#include"SkyBox.h"
SkyBox::SkyBox(LPCSTR textureFile)
{
D3DXCreateBox(DirectX::device, 1.0f, 1.0f, 1.0f, &mesh, NULL);
D3DXCreateCubeTextureFromFile(DirectX::device, textureFile, &texture);
}
SkyBox::~SkyBox()
{
mesh->Release();
texture->Release();
}
void SkyBox::Draw()
{
D3DXHANDLE textureHandle = DirectX::currentShaderEffect->GetParameterByName(0, "tex0");
DirectX::currentShaderEffect->SetTexture(textureHandle, texture);
DirectX::currentShaderEffect->CommitChanges();
UINT passNum = 5;
DirectX::currentShaderEffect->Begin(&passNum, 0);
DirectX::currentShaderEffect->BeginPass(5);
mesh->DrawSubset(0);
DirectX::currentShaderEffect->EndPass();
DirectX::currentShaderEffect->End();
}
And this is my shader for the Cube Map:
uniform extern float4x4 mvp;
uniform extern texture tex0;
struct SkyboxVS
{
float4 pos : POSITION0;
float3 uv0 : TEXCOORD0;
};
sampler SkyBoxTex = sampler_state
{
Texture = <tex0>;
MinFilter = LINEAR;
MagFilter = LINEAR;
MipFilter = LINEAR;
AddressU = WRAP;
AddressV = WRAP;
};
SkyboxVS VertexSkybox(float3 position : POSITION0, float3 texCoord : TEXCOORD0)
{
SkyboxVS skyVS = (SkyboxVS)0;
skyVS.pos = mul(float4(position, 1.0f), mvp);
skyVS.uv0 = texCoord;
return skyVS;
}
float4 PixelSkybox(float3 texCoord: TEXCOORD0) : COLOR
{
float4 color = texCUBE(SkyBoxTex, texCoord);
return color;
}
technique TransformTech
{
pass P5
{
vertexShader = compile vs_2_0 VertexSkybox();
pixelShader = compile ps_2_0 PixelSkybox();
ZFunc = Always;
StencilEnable = true;
StencilFunc = Always;
StencilPass = Replace;
StencilRef = 0;
}
}
Here's some sample code:
Sky::Sky(const std::string& envmapFilename, float skyRadius)
: mRadius(skyRadius)
{
HR(D3DXCreateSphere(gd3dDevice, skyRadius, 30, 30, &mSphere, 0));
HR(D3DXCreateCubeTextureFromFile(gd3dDevice, envmapFilename.c_str(), &mEnvMap));
ID3DXBuffer* errors = 0;
HR(D3DXCreateEffectFromFile(gd3dDevice, "sky.fx", 0, 0, 0,
0, &mFX, &errors));
if( errors )
MessageBox(0, (char*)errors->GetBufferPointer(), 0, 0);
mhTech = mFX->GetTechniqueByName("SkyTech");
mhWVP = mFX->GetParameterByName(0, "gWVP");
mhEnvMap = mFX->GetParameterByName(0, "gEnvMap");
// Set effect parameters that do not vary.
HR(mFX->SetTechnique(mhTech));
HR(mFX->SetTexture(mhEnvMap, mEnvMap));
}
void Sky::draw()
{
// Sky always centered about camera's position.
D3DXMATRIX W;
D3DXVECTOR3 p = gCamera->pos();
D3DXMatrixTranslation(&W, p.x, p.y, p.z);
HR(mFX->SetMatrix(mhWVP, &(W*gCamera->viewProj())));
UINT numPasses = 0;
HR(mFX->Begin(&numPasses, 0));
HR(mFX->BeginPass(0));
HR(mSphere->DrawSubset(0));
HR(mFX->EndPass());
HR(mFX->End());
}
And shader code:
OutputVS EnvMapVS(float3 posL : POSITION0, float3 normalL : NORMAL0, float2 tex0: TEXCOORD0)
{
// Zero out our output.
OutputVS outVS = (OutputVS)0;
// Transform normal to world space.
outVS.normalW = mul(float4(normalL, 0.0f), gWorldInvTrans).xyz;
// Transform vertex position to world space.
float3 posW = mul(float4(posL, 1.0f), gWorld).xyz;
// Compute the unit vector from the vertex to the eye.
outVS.toEyeW = gEyePosW - posW;
// Transform to homogeneous clip space.
outVS.posH = mul(float4(posL, 1.0f), gWVP);
// Pass on texture coordinates to be interpolated in rasterization.
outVS.tex0 = tex0;
// Done--return the output.
return outVS;
}
float4 EnvMapPS(float3 normalW : TEXCOORD0,
float3 toEyeW : TEXCOORD1,
float2 tex0 : TEXCOORD2) : COLOR
{
// Interpolated normals can become unnormal--so normalize.
normalW = normalize(normalW);
toEyeW = normalize(toEyeW);
// Light vector is opposite the direction of the light.
float3 lightVecW = -gLight.dirW;
// Compute the reflection vector.
float3 r = reflect(-lightVecW, normalW);
// Determine how much (if any) specular light makes it into the eye.
float t = pow(max(dot(r, toEyeW), 0.0f), gMtrl.specPower);
// Determine the diffuse light intensity that strikes the vertex.
float s = max(dot(lightVecW, normalW), 0.0f);
// Get the texture color.
float4 texColor = tex2D(TexS, tex0);
// Get the reflected color.
float3 envMapTex = reflect(-toEyeW, normalW);
float3 reflectedColor = texCUBE(EnvMapS, envMapTex);
// Weighted average between the reflected color, and usual
// diffuse/ambient material color modulated with the texture color.
float3 ambientMtrl = gReflectivity*reflectedColor + (1.0f-gReflectivity)*(gMtrl.ambient*texColor);
float3 diffuseMtrl = gReflectivity*reflectedColor + (1.0f-gReflectivity)*(gMtrl.diffuse*texColor);
// Compute the ambient, diffuse and specular terms separately.
float3 spec = t*(gMtrl.spec*gLight.spec).rgb;
float3 diffuse = s*(diffuseMtrl*gLight.diffuse.rgb);
float3 ambient = ambientMtrl*gLight.ambient;
float3 final = ambient + diffuse + spec;
// Output the color and the alpha.
return float4(final, gMtrl.diffuse.a*texColor.a);
}