Hello I recently tried to learn DirectX 11 but my program does not draw anything.
The only thing I get is the window with the background color i have chosen
I have divided my program into a library(engine) and a regular project.
The library contains a model class, shader class and a Directx init function.
the S3DData is just a struct containing all relevant classes e.g. swap chain etc.
static bool initDX(logfile* errorlog, S3DData *data){
D3D_FEATURE_LEVEL featureLevels[] = {
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0
};
UINT numFeatureLevels = 3;
D3D_FEATURE_LEVEL featureLevel = D3D_FEATURE_LEVEL_11_0;
HRESULT result = ERROR_SUCCESS;
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
//swapchain and device
bufferDesc.Height = data->WindowHeight;
bufferDesc.Width = data->WindowWidth;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.RefreshRate.Numerator = 60;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.OutputWindow = data->Handle;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.Windowed = data->Windowed;
swapChainDesc.BufferCount = 1;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.SampleDesc.Count = 1;
result = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, NULL, NULL,
D3D11_SDK_VERSION, &swapChainDesc, &data->SwapChain, &data->Device, NULL, &data->DeviceContext);
if(FAILED(result)){
std::string error;
errorlog->write("failed to create swapchain or device:");
if(result == E_INVALIDARG)
error = "invalid argument";
else if(result == E_OUTOFMEMORY)
error = " no memory";
else if(result == DXGI_ERROR_MORE_DATA)
error = " more data needed for buffer";
else if(result == E_NOTIMPL)
error = " not implemented";
else if(result == DXGI_ERROR_INVALID_CALL)
error = " invalid call";
else
error = std::to_string((unsigned int)result);
errorlog->write(error);
return false;
}
//back buffer and rendertargetview
ID3D11Texture2D *backbuffer;
result = data->SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&backbuffer);
if(FAILED(result)){
errorlog->write("failed to get backbuffer");
return false;
}
result = data->Device->CreateRenderTargetView(backbuffer, NULL, &data->RenderTargetView);
if(FAILED(result)){
errorlog->write("failed to create render target view");
return false;
}
data->DeviceContext->OMSetRenderTargets(1, &data->RenderTargetView, nullptr);
backbuffer->Release();
ZeroMemory(&data->viewport, sizeof(D3D11_VIEWPORT));
data->viewport.Height = data->WindowHeight;
data->viewport.Width = data->WindowWidth;
data->viewport.TopLeftX = 0;
data->viewport.TopLeftY = 0;
data->DeviceContext->RSSetViewports(1, &data->viewport);
errorlog->write("directx success");
return true;
the function basically creates: the device, swapchain and devicecontext.
and sets: the render target and the viewport
the second funtion is the shader init function:
bool shader::init(std::string vsFile, std::string psFile, S3DData * data){
std::ofstream output;
output.open("shaderErrorLog.txt", std::ios::binary);
_S3DData = data;
_pixelShader = nullptr;
_vertexShader = nullptr;
_layout = nullptr;
HRESULT result;
ID3D10Blob *errorMsg, *pixelShader, *vertexShader;;
unsigned int numElements;
errorMsg = 0;
pixelShader = 0;
vertexShader = 0;
result = D3DX11CompileFromFile(vsFile.c_str(), 0, 0, "VS", "vs_5_0", 0, 0, 0, &vertexShader, &errorMsg, 0);
if(FAILED(result)){
if(errorMsg != nullptr){
char *compilerErrors = (char*)errorMsg->GetBufferPointer();
unsigned int size = errorMsg->GetBufferSize();
output.write(compilerErrors, size);
}
else
{
std::string error ="failed to find file";
output.write(error.c_str(), error.size());
}
return false;
}
result = D3DX11CompileFromFile(psFile.c_str(), 0, 0, "PS", "ps_5_0", 0, 0, 0, &pixelShader, &errorMsg, 0);
if(FAILED(result)){
if(errorMsg){
char *compilerErrors = (char*)errorMsg->GetBufferPointer();
unsigned int size = errorMsg->GetBufferSize();
output.write(compilerErrors, size);
}
else
{
std::string noFileMsg = "file " +psFile +"not found";
output.write(noFileMsg.c_str(), noFileMsg.size());
}
return false;
}
result = _S3DData->Device->CreateVertexShader(vertexShader->GetBufferPointer(), vertexShader->GetBufferSize(), nullptr, &_vertexShader);
if(FAILED(result)){
return false;
}
result = _S3DData->Device->CreatePixelShader(pixelShader->GetBufferPointer(), pixelShader->GetBufferSize(), nullptr, &_pixelShader);
if(FAILED(result)){
return false;
}
//layout of vertex
//in case of color.fx position and color
D3D11_INPUT_ELEMENT_DESC layout[] ={
{"POSITION",0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0}
};
//get num of elements
numElements = 2;
result = _S3DData->Device->CreateInputLayout(layout, numElements, vertexShader->GetBufferPointer(), vertexShader->GetBufferSize(), &_layout);
if(FAILED(result))
return false;
vertexShader->Release();
vertexShader = 0;
pixelShader->Release();
pixelShader = 0;
std::string success = "shader init : success";
output.write(success.c_str() , success.size());
_S3DData->DeviceContext->IASetInputLayout(_layout);
_S3DData->DeviceContext->VSSetShader(_vertexShader, 0, 0);
_S3DData->DeviceContext->PSSetShader(_pixelShader, 0, 0);
return true;
and these are the members of the shader class:
ID3D11VertexShader *_vertexShader;
ID3D11PixelShader *_pixelShader;
ID3D11InputLayout *_layout;
S3DData *_S3DData;
this function creates the shaders and since i only have 1 shader for now,
it sets the shaders and the input layout.
the last function is the model init function:
bool model::init(S3DData *data){
_S3DData = data;
HRESULT result;
vertex *vertexBuffer;
unsigned long* indexBuffer;
D3D11_BUFFER_DESC indexDesc, vertexDesc;
D3D11_SUBRESOURCE_DATA indexData, vertexData;
//create buffers
_vertexCount = 3;
_indexCount = 3;
vertexBuffer = new vertex[_vertexCount];
if(!vertexBuffer)return false;
indexBuffer = new unsigned long[_indexCount];
if(!indexBuffer)return false;
//fill buffers
vertexBuffer[0] = vertex( 0.0f, 1.0f, 1.0f);
vertexBuffer[0] = vertex( 1.0f, -1.0f, 1.0f);
vertexBuffer[0] = vertex( -1.0f, -1.0f, 1.0f);
indexBuffer[0] = 0;
indexBuffer[1] = 1;
indexBuffer[2] = 2;
//bufferDesc
vertexDesc.Usage = D3D11_USAGE_DEFAULT;
vertexDesc.ByteWidth = sizeof(vertex) * _vertexCount;
vertexDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexDesc.CPUAccessFlags = 0;
vertexDesc.MiscFlags = 0;
vertexDesc.StructureByteStride = 0;
//set subressource data
vertexData.pSysMem = vertexBuffer;
vertexData.SysMemPitch = 0;
vertexData.SysMemSlicePitch = 0;
result = _S3DData->Device->CreateBuffer(&vertexDesc, &vertexData, &_vertex);
if(FAILED(result))return false;
indexDesc.ByteWidth = sizeof(unsigned long) * _indexCount;
indexDesc.Usage = D3D11_USAGE_DEFAULT;
indexDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexDesc.MiscFlags = 0;
indexDesc.CPUAccessFlags = 0;
indexDesc.StructureByteStride = 0;
//set subressource
indexData.pSysMem = indexBuffer;
indexData.SysMemPitch = 0;
indexData.SysMemSlicePitch = 0;
result = _S3DData->Device->CreateBuffer(&indexDesc, &indexData, &_index);
if(FAILED(result))return false;
delete []indexBuffer;
indexBuffer = nullptr;
delete []vertexBuffer;
vertexBuffer = nullptr;
the vertex struct:
struct vertex{
XMFLOAT3 pos;
vertex(){}
vertex(float x, float y, float z):pos(x, y, z){
}
so this function only creates the buffers
in the render function the remaining variable are set:
void model::render(shader *Shader){
unsigned int stride = sizeof(vertex);
unsigned int offset = 0;
_S3DData->DeviceContext->IASetVertexBuffers(0, 1, &_vertex, &stride, &offset);
_S3DData->DeviceContext->IASetIndexBuffer(_index, DXGI_FORMAT_R32_UINT, 0);
//set form of vertex: triangles
_S3DData->DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
_S3DData->DeviceContext->DrawIndexed(_indexCount, 0, 0);
}
EDIT:
these are the shader codes you requested
Vertex Shader:
struct VSout{
float4 position :SV_POSITION;
};
VSout VS(float4 position:POSITION){
VSout output;
output.position = position;
return output;
}
Pixel Shader:
float4 PS() :SV_TARGET{
float4 newColor = float4(1.0f, 1.0f, 0.0f, 1.0f);
return newColor;
}
this here is a screenshot of the debuger left you have all the draw calls etc. And in the middle you can see the vertex buffer
debuger
thanks for your help in advance.
Looking at the debugger image you posted, the 2nd and 3rd vertices are all 0. This means you didn't fill your vertex buffer properly.
Looking at your code, when you fill your vertex buffer, you're only setting it in the 0 index. So you code looks like this:
vertexBuffer[0] = vertex( 0.0f, 1.0f, 1.0f);
vertexBuffer[0] = vertex( 1.0f, -1.0f, 1.0f);
vertexBuffer[0] = vertex( -1.0f, -1.0f, 1.0f);
And it should look like this:
vertexBuffer[0] = vertex( 0.0f, 1.0f, 1.0f);
vertexBuffer[1] = vertex( 1.0f, -1.0f, 1.0f);
vertexBuffer[2] = vertex( -1.0f, -1.0f, 1.0f);
Related
I am trying to make a simple d3d11 program to rotate a cube.
But ultimately the cube doesnt seem to appear.
Only the screen is being cleared to blue but the cube doesnot show up.
I have been using this as my source: https://learn.microsoft.com/en-us/windows/win32/direct3dgetstarted/getting-started-with-a-directx-game
The structure of my project is a
MainClass.cpp, MainClass.h (Handles window initializtion)
DeviceResources.cpp, DeviceResources.h (The Device Resources include device, context, etc.)
Renderer.cpp, Renderer.h (The renderer loads geometry and shaders. Most probably this is where i am going wrong)
Here is my Renderer.h :
#pragma once
#include <memory>
#include <Windows.h>
#include <DirectXMath.h>
#include "DeviceResources.h"
class Renderer
{
public:
Renderer(std::shared_ptr<DeviceResources> deviceResources);
~Renderer();
void CreateDeviceDependentResources();
void CreateWindowSizeDependentResources();
void Update();
void Render();
private:
HRESULT CreateShaders();
HRESULT CreateCube();
void CreateViewAndPerspective();
bool m_done = false;
std::shared_ptr<DeviceResources> m_deviceResources;
typedef struct _constantBufferStruct {
DirectX::XMFLOAT4X4 world;
DirectX::XMFLOAT4X4 view;
DirectX::XMFLOAT4X4 projection;
} ConstantBufferStruct;
static_assert((sizeof(ConstantBufferStruct) % 16) == 0, "Constant Buffer size must be 16-byte aligned");
typedef struct _vertexPositionColor {
DirectX::XMFLOAT3 pos;
DirectX::XMFLOAT3 color;
} VertexPositionColor;
typedef struct _vertexPositionColorTangent {
DirectX::XMFLOAT3 pos;
DirectX::XMFLOAT3 color;
DirectX::XMFLOAT3 tangent;
} VertexPositionColorTangent;
ConstantBufferStruct m_constantBufferData;
unsigned int m_indexCount;
unsigned int m_frameCount;
Microsoft::WRL::ComPtr<ID3D11Buffer> m_pVertexBuffer;
Microsoft::WRL::ComPtr<ID3D11Buffer> m_pIndexBuffer;
Microsoft::WRL::ComPtr<ID3D11VertexShader> m_pVertexShader;
Microsoft::WRL::ComPtr<ID3D11PixelShader> m_pPixelShader;
Microsoft::WRL::ComPtr<ID3D11InputLayout> m_pInputLayout;
Microsoft::WRL::ComPtr<ID3D11InputLayout> m_pInputLayoutExtended;
Microsoft::WRL::ComPtr<ID3D11Buffer> m_pConstantBuffer;
};
And here is my Renderer.cpp code :
#include "Renderer.h"
#include <fstream>
#include <ppltasks.h>
#include <stdexcept>
Renderer::Renderer(std::shared_ptr<DeviceResources> deviceResources) :
m_deviceResources(deviceResources),
m_frameCount(0)
{
m_frameCount = 0;
}
Renderer::~Renderer() {}
void Renderer::CreateDeviceDependentResources() {
auto CreateShaderTask = Concurrency::create_task([this]() {CreateShaders();
m_done = true;
});
auto CreateCubeTask = CreateShaderTask.then([this]() {CreateCube(); });
}
void Renderer::CreateWindowSizeDependentResources() {
CreateViewAndPerspective();
}
HRESULT Renderer::CreateShaders() {
HRESULT hr = S_OK;
ID3D11Device* device = m_deviceResources->GetDevice();
FILE* vShader, * pShader;
BYTE* bytes;
size_t destSize = 4096;
size_t bytesRead = 0;
bytes = new BYTE[destSize];
fopen_s(&vShader, "cubeVertexShader.cso", "rb");
bytesRead = fread_s(bytes, destSize, 1, 4096, vShader);
hr = device->CreateVertexShader(
bytes,
bytesRead,
nullptr,
&m_pVertexShader
);
D3D11_INPUT_ELEMENT_DESC iaDesc[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT,
0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT,
0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
hr = device->CreateInputLayout(iaDesc,ARRAYSIZE(iaDesc), bytes, bytesRead, &m_pInputLayout);
delete bytes;
bytes = new BYTE[destSize];
bytesRead = 0;
fopen_s(&pShader, "cubePixelShader.cso", "rb");
bytesRead = fread_s(bytes, destSize, 1, 4096, pShader);
hr = device->CreatePixelShader(bytes, bytesRead, nullptr, &m_pPixelShader);
CD3D11_BUFFER_DESC cbDesc(sizeof(ConstantBufferStruct), D3D11_BIND_CONSTANT_BUFFER );
hr = device->CreateBuffer(&cbDesc, nullptr, &m_pConstantBuffer);
if (FAILED(hr))
throw std::exception("Failed to Create Constant Buffer");
fclose(vShader);
fclose(pShader);
return hr;
}
HRESULT Renderer::CreateCube() {
HRESULT hr = S_OK;
ID3D11Device* device = m_deviceResources->GetDevice();
VertexPositionColor CubeVertices[] =
{
{DirectX::XMFLOAT3(-0.5f,-0.5f,-0.5f), DirectX::XMFLOAT3(0, 0, 0),},
{DirectX::XMFLOAT3(-0.5f,-0.5f, 0.5f), DirectX::XMFLOAT3(0, 0, 1),},
{DirectX::XMFLOAT3(-0.5f, 0.5f,-0.5f), DirectX::XMFLOAT3(0, 1, 0),},
{DirectX::XMFLOAT3(-0.5f, 0.5f, 0.5f), DirectX::XMFLOAT3(0, 1, 1),},
{DirectX::XMFLOAT3(0.5f,-0.5f,-0.5f), DirectX::XMFLOAT3(1, 0, 0),},
{DirectX::XMFLOAT3(0.5f,-0.5f, 0.5f), DirectX::XMFLOAT3(1, 0, 1),},
{DirectX::XMFLOAT3(0.5f, 0.5f,-0.5f), DirectX::XMFLOAT3(1, 1, 0),},
{DirectX::XMFLOAT3(0.5f, 0.5f, 0.5f), DirectX::XMFLOAT3(1, 1, 1),},
};
unsigned short CubeIndices[] =
{
0,2,1, // -x
1,2,3,
4,5,6, // +x
5,7,6,
0,1,5, // -y
0,5,4,
2,6,7, // +y
2,7,3,
0,4,6, // -z
0,6,2,
1,3,7, // +z
1,7,5,
};
m_indexCount = ARRAYSIZE(CubeIndices);
CD3D11_BUFFER_DESC vbDesc(sizeof(CubeVertices), D3D11_BIND_VERTEX_BUFFER);
D3D11_SUBRESOURCE_DATA vData;
ZeroMemory(&vData, sizeof(D3D11_SUBRESOURCE_DATA));
vData.pSysMem = CubeVertices;
vData.SysMemPitch = 0;
vData.SysMemSlicePitch = 0;
hr = device->CreateBuffer(&vbDesc, &vData, &m_pVertexBuffer);
CD3D11_BUFFER_DESC ibDesc(sizeof(CubeIndices), D3D11_BIND_INDEX_BUFFER);
D3D11_SUBRESOURCE_DATA iData;
ZeroMemory(&iData, sizeof(D3D11_SUBRESOURCE_DATA));
iData.pSysMem = CubeIndices;
iData.SysMemPitch = 0;
iData.SysMemSlicePitch = 0;
hr = device->CreateBuffer(&ibDesc, &iData, &m_pIndexBuffer);
return hr;
}
void Renderer::CreateViewAndPerspective() {
// Use DirectXMath to create view and perspective matrices.
DirectX::XMVECTOR eye = DirectX::XMVectorSet(0.0f, 0.7f, 1.5f, 0.f);
DirectX::XMVECTOR at = DirectX::XMVectorSet(0.0f, -0.1f, 0.0f, 0.f);
DirectX::XMVECTOR up = DirectX::XMVectorSet(0.0f, 1.0f, 0.0f, 0.f);
DirectX::XMStoreFloat4x4(
&m_constantBufferData.view,
DirectX::XMMatrixTranspose(
DirectX::XMMatrixLookAtRH(
eye,
at,
up
)
)
);
float aspectRatio = m_deviceResources->GetAspectRatio();
DirectX::XMStoreFloat4x4(
&m_constantBufferData.projection,
DirectX::XMMatrixTranspose(
DirectX::XMMatrixPerspectiveFovRH(
DirectX::XMConvertToRadians(70),
aspectRatio,
0.01f,
100.0f
)
)
);
}
void Renderer::Update() {
DirectX::XMStoreFloat4x4(
&m_constantBufferData.world,
DirectX::XMMatrixTranspose(
DirectX::XMMatrixRotationY(
DirectX::XMConvertToRadians(
(float)m_frameCount++
)
)
)
);
if (m_frameCount == MAXUINT) m_frameCount = 0;
}
void Renderer::Render() {
while (!m_done) {
}
ID3D11DeviceContext* context = m_deviceResources->GetDeviceContext();
ID3D11RenderTargetView* renderTarget = m_deviceResources->GetRenderTarget();
ID3D11DepthStencilView* depthStencil = m_deviceResources->GetDepthStencil();
context->UpdateSubresource(m_pConstantBuffer.Get(), 0, nullptr, &m_constantBufferData, 0, 0);
const float blue[4] = { 0.2f, 0.3f, 0.8f, 1.0f};
context->ClearRenderTargetView(renderTarget, blue);
context->ClearDepthStencilView(depthStencil, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0);
context->OMSetRenderTargets(1, &renderTarget, depthStencil);
UINT stride = sizeof(VertexPositionColor);
UINT offset = 0;
context->IASetVertexBuffers(
0,
1,
m_pVertexBuffer.GetAddressOf(),
&stride,
&offset
);
context->IASetIndexBuffer(m_pIndexBuffer.Get(), DXGI_FORMAT_R16_UINT, 0);
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
context->IASetInputLayout(m_pInputLayout.Get());
context->VSSetShader(m_pVertexShader.Get(), nullptr, 0);
context->VSSetConstantBuffers(0, 1, m_pConstantBuffer.GetAddressOf());
context->PSSetShader(m_pPixelShader.Get(), nullptr, 0);
//context->DrawIndexed(m_indexCount, 0, 0);
context->Draw(3, 0);
}
For more reference here are the shaders:-
Vertex Shader
cbuffer ModelViewProjectionBuffer : register(b0)
{
matrix model;
matrix view;
matrix projection;
};
float4 main(float3 Position:POSITION, float3 Color:COLOR) : SV_POSITION// main is the default function name
{
float4 pos = float4(Position, 1.0f);
// Transform the position from object space to homogeneous projection space
pos = mul(pos, model);
pos = mul(pos, view);
pos = mul(pos, projection);
return pos;
}
Pixel Shader:
float4 main(float4 position:SV_POSITION) : SV_TARGET
{
return float4(0.9f, 0.4f, 0.2f, 1.0f);
}
Image of the output
Please help me analyze where i am going wrong.
Moreover, i am using visual studio 2019.
Edit: Some were asking a for a complete reproduction so here is the visual studio solution
https://drive.google.com/file/d/1jt6fQgbRElpc9AYpbhYOyp-HCQL3WmEF/view?usp=sharing
You're doing invalid memory accesses when reading the vertex and pixel buffers.
Also, on my computer, both shaders have a dozen of kbytes whilst the memory buffer you're reading them to has only 4096 bytes.
To fix your problem, increase the size of the memory buffer you're using to read the shader bytecode from disk and don't forget to update the elementcount in fread_s().
e.g.,
Renderer.cpp # 35
size_t destSize = 1024 * 1024; // CHANGE THE SIZE OF YOUR MEMORY BUFFER HERE
size_t bytesRead = 0;
bytes = new BYTE[destSize];
fopen_s(&vShader, "cubeVertexShader.cso", "rb");
bytesRead = fread_s(bytes, destSize * sizeof(BYTE), sizeof(BYTE), destSize, vShader);
hr = device->CreateVertexShader(
bytes,
bytesRead,
nullptr,
&m_pVertexShader
);
Do the same where you're reading the pixel shader.
PRO TIP: Pay attention to the debug output and you'll see important messages from d3d debug layer, like the one telling you that both shaders failed to be created :)
I'm Leanrning DirectX11 and I'm trying to draw 2 Quad with a single function but when i call this function , the function override the previous QUAD.
If you want to see the problem , here is a gif :
<video alt="Video from Gyazo" width="1280" autoplay muted loop playsinline controls><source src="https://i.gyazo.com/b819ffc64975c1531434047b9b4a92f7.mp4" type="video/mp4" /></video>
Here is the code :
(here is where i call the function to draw the QUAD)
void Application::ApplicationRun()
{
//wind.SetEventCallBack(std::bind(&Application::OnEvent, Instance, std::placeholders::_1));
Vertex v[] =
{
Vertex(-0.5f, -0.5f, 0.0f, 1.0f), /// botom Left Point - [0]
Vertex(-0.5f, 0.5f, 0.0f, 0.0f), //top Left Point - [1]
Vertex{ 0.0f, -0.5f, 1.0f, 1.0f}, // -[2]
Vertex(0.0f, 0.5f, 1.0f, 0.0f), //Right Point - [3]
};
Vertex vv[] =
{
Vertex(-0.1f, -0.1f, 0.0f, 0.0f), /// botom Left Point - [0]
Vertex(-0.1f, 0.1f, 0.0f, 0.0f), //top Left Point - [1]
Vertex{ 0.05f, -0.1f, 0.0f, 0.0f}, // -[2]
Vertex(0.05f, 0.1f, 0.0f, 0.0f), //Right Point - [3]
};
DWORD indices[] =
{
0,1,2,
1,2,3,
};
while (wind.PorcessMessage())
{
//
if (Armageddon::Application::GetInstance()->GetWindow()->GetNativeKeyBoard().KeyIsPressed(AG_KEY_A))
{
Log::GetLogger()->trace("A ");
wind.GetWindowGraphics()->DrawTriangle(v, ARRAYSIZE(v));
}
if (Armageddon::Application::GetInstance()->GetWindow()->GetNativeKeyBoard().KeyIsPressed(AG_KEY_B))
{
Log::GetLogger()->trace("B");
wind.GetWindowGraphics()->DrawTriangle(vv, ARRAYSIZE(vv));
}
}
}
(Here is the DrawTriangle Function) :
void Armageddon::D3D_graphics::DrawTriangle(Vertex v[], int Vertexcount)
{
HRESULT hr = vertexBuffer.Initialize(this->device.Get(),this->device_context.Get() , v, Vertexcount);
if (FAILED(hr))
{
Armageddon::Log::GetLogger()->error("FAILED INITIALIZE VERTEX BUFFER ");
}
DWORD indices[] =
{
0,1,2,
1,2,3,
};
hr = this->indicesBuffer.Init(this->device.Get(), indices, ARRAYSIZE(indices));
hr = DirectX::CreateWICTextureFromFile(this->device.Get(), L"..\\TestApplication\\assets\\Textures\\tex.png",nullptr,textures.GetAddressOf());
if (FAILED(hr))
{
Armageddon::Log::GetLogger()->error("FAILED INITIALIZE WIC TEXTURE ");
}
}
(Here is where i initialize the Vertex and the Indice buffer) :
HRESULT Initialize(ID3D11Device* device , ID3D11DeviceContext* device_context, T* data, UINT numElements)
{
this->bufferSize = numElements;
this->stride = sizeof(T);
D3D11_BUFFER_DESC vertex_buffer_desc;
ZeroMemory(&vertex_buffer_desc, sizeof(vertex_buffer_desc));
vertex_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
vertex_buffer_desc.ByteWidth = sizeof(Vertex) * numElements;
vertex_buffer_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertex_buffer_desc.CPUAccessFlags = 0;
vertex_buffer_desc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA VertexBufferData;
ZeroMemory(&VertexBufferData, sizeof(VertexBufferData));
VertexBufferData.pSysMem = data;
HRESULT hr = device->CreateBuffer(&vertex_buffer_desc, &VertexBufferData, buffer.GetAddressOf());
UINT offset = 0;
device_context->IASetVertexBuffers(0, 1, buffer.GetAddressOf(), &stride, &offset);
return hr;
};```
HRESULT Init(ID3D11Device* device, DWORD* data, UINT n_indices)
{
D3D11_SUBRESOURCE_DATA Indice_buffer_data;
this->buffer_size = n_indices;
D3D11_BUFFER_DESC Indice_buffer_desc;
ZeroMemory(&Indice_buffer_desc, sizeof(Indice_buffer_desc));
Indice_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
Indice_buffer_desc.ByteWidth = sizeof(DWORD) * n_indices;
Indice_buffer_desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
Indice_buffer_desc.CPUAccessFlags = 0;
Indice_buffer_desc.MiscFlags = 0;
ZeroMemory(&Indice_buffer_data, sizeof(Indice_buffer_data));
Indice_buffer_data.pSysMem = data;
HRESULT hr = device->CreateBuffer(&Indice_buffer_desc, &Indice_buffer_data, buffer.GetAddressOf());
return hr;
};
And here is where I draw the quads :
void Armageddon::D3D_graphics::RenderFrame()
{
float color[] = { 0.1f,0.1f,0.1f,1.0f };
ImGui_ImplDX11_NewFrame();
ImGui_ImplWin32_NewFrame();
ImGui::NewFrame();
if (show_demo_window)
ImGui::ShowDemoWindow(&show_demo_window);
// 2. Show a simple window that we create ourselves. We use a Begin/End pair to created a named window.
{
static float f = 0.0f;
static int counter = 0;
ImGui::Begin("Hello, world!"); // Create a window called "Hello, world!" and append into it.
ImGui::Text("This is some useful text."); // Display some text (you can use a format strings too)
ImGui::Checkbox("Demo Window", &show_demo_window); // Edit bools storing our window open/close state
ImGui::Checkbox("Another Window", &show_another_window);
ImGui::SliderFloat("float", &f, 0.0f, 1.0f); // Edit 1 float using a slider from 0.0f to 1.0f
ImGui::ColorEdit3("clear color", (float*)&clear_color); // Edit 3 floats representing a color
if (ImGui::Button("Button")) // Buttons return true when clicked (most widgets return true when edited/activated)
counter++;
ImGui::SameLine();
ImGui::Text("counter = %d", counter);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
ImGui::End();
}
// 3. Show another simple window.
if (show_another_window)
{
ImGui::Begin("Another Window", &show_another_window); // Pass a pointer to our bool variable (the window will have a closing button that will clear the bool when clicked)
ImGui::Text("Hello from another window!");
if (ImGui::Button("Close Me"))
show_another_window = false;
ImGui::End();
}
ImGui::Render();
this->device_context->OMSetRenderTargets(1, target_view.GetAddressOf(), this->depthStencilView.Get());
this->device_context->ClearRenderTargetView(this->target_view.Get(), color);
this->device_context->ClearDepthStencilView(this->depthStencilView.Get(), D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0);
//ICI QU'ON FAIT TOUT LES RENDU APRES AVOIR CLEAN LE PLAN
this->device_context->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY::D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
f += 0.1;
ConstantBuffer.data.mat = DirectX::XMMatrixRotationRollPitchYaw(0.0f,0.0f,f);
ConstantBuffer.data.mat = DirectX::XMMatrixTranspose(ConstantBuffer.data.mat);
ConstantBuffer.data.Yoffset = f;
ConstantBuffer.data.Xoffset = 0;
// Armageddon::Log::GetLogger()->trace(ConstantBuffer.data.Yoffset);
if (!ConstantBuffer.ApplyChanges())
{
Armageddon::Log::GetLogger()->error("ERRO WHEN APPLYING CHANGES");
}
this->device_context->VSSetConstantBuffers(0, 1, ConstantBuffer.GetAdressOf());
/***********SHADER*******************************/
this->device_context->VSSetShader(vertexShader.GetShader(), NULL, 0);
this->device_context->PSSetShader(pixelShader.GetShader(), NULL, 0);
this->device_context->IASetInputLayout(this->vertexShader.GetInputLayout());
/***********Texture Sampler*******************************/
this->device_context->PSSetSamplers(0, 1, this->ColorSampler.GetAddressOf());
/***********DEPHT BUFFER*******************************/
this->device_context->OMSetDepthStencilState(this->depthStencilState.Get(), 0);
/***********RASTERIZER STATE*******************************/
this->device_context->RSSetState(this->rasterizerState.Get());
/***********UPDATE LES CONSTANTS BUFFER*******************************/
UINT stride = sizeof(Vertex);
UINT offset = 0;
this->device_context->IASetIndexBuffer(indicesBuffer.Get(), DXGI_FORMAT_R32_UINT, 0);
//this->device_context->IASetVertexBuffers(0, 1, vertexBuffer.GetAddressOf(), &stride, &offset);
this->device_context->PSSetShaderResources(0, 1, this->textures.GetAddressOf());
this->device_context->DrawIndexed(indicesBuffer.GetSize(), 0,0);
ImGui_ImplDX11_RenderDrawData(ImGui::GetDrawData());
this->swapchain->Present(1,0);
}
I am trying to render a textured square, but it looks like the texture doesn't get interpolated like it should. It looks like it gets mirrored on each triangle of my square. Behavior is presented on the image below.
Note that I'm using this tutorial.
I don't have an idea where to start fixing my code. Also, when I try to translate my Image layout, I get this error: "Cannot submit cmd buffer using image 0x25 with layout VK_IMAGE_LAYOUT_UNDEFINED when first use is VK_IMAGE_LAYOUT_TRANSFER_DST_BIT."
Also, I get this warning when submitting a drawing command buffer for the first time: "Cannot submit cmd buffer using image 0x25 with layout VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL when first use is VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL."
Can this warning cause my issue? Also, here are my vertices and its texture coordinates.
vertices->setVertices({
{{-1.0f, -1.0f}, {1.0f, 0.0f, 0.0f}, {1.0f, 0.0f}},
{{1.0f, -1.0f}, {0.0f, 1.0f, 0.0f}, {0.0f, 0.0f}},
{{1.0f, 1.0f}, {0.0f, 0.0f, 1.0f}, {0.0f, 1.0f}},
{{-1.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {1.0f, 1.0f}}
});
vertices->setIndices({ 0, 1, 2, 2, 3, 0 });
Update:
Here is my image transition code:
void Util::transitionImageLayout(VkImage *image, VkFormat format,
VkImageLayout oldLayout, VkImageLayout newLayout,
VkCommandBuffer recordingBuffer) {
VkImageMemoryBarrier barrier = {};
VkImageSubresourceRange subresourceRange = {};
VkPipelineStageFlags sourceStage = {};
VkPipelineStageFlags dstStage = {};
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
dstStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
}
else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
else {
throw std::invalid_argument("Layout transition not supported.");
}
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseArrayLayer = 0;
subresourceRange.baseMipLevel = 0;
subresourceRange.layerCount = 1;
subresourceRange.levelCount = 1;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.image = *image;
barrier.oldLayout = oldLayout;
barrier.newLayout = newLayout;
barrier.subresourceRange = subresourceRange;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
vkCmdPipelineBarrier(recordingBuffer, sourceStage, dstStage, 0, 0, nullptr, 0, nullptr, 0, &barrier);
}
Here is my copy buffer to image code:
void Util::copyBufferToimage(VkCommandBuffer cmdBuffer, VkBuffer buffer,
VkImage *image, uint32_t width, uint32_t height) {
VkBufferImageCopy region{};
VkImageSubresourceLayers subresouce{};
subresouce.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresouce.baseArrayLayer = 0;
subresouce.mipLevel = 0;
subresouce.layerCount = 1;
region.bufferImageHeight = 0;
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.imageOffset = { 0, 0, 0 };
region.imageExtent = { width, height, 1 };
region.imageSubresource = subresouce;
vkCmdCopyBufferToImage(cmdBuffer, buffer, *image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
}
Notes:
I have tried running the project from tutorial's git repo, and it works fine. They get no warnings.
The problem was in my vertices struct code which I didn't provide. Format of texture coordinates was wrong. It should be R32 G32, and I have set it to R32 G32 B32 A32.
static std::array<VkVertexInputAttributeDescription, 3> getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 3> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, position);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
attributeDescriptions[2].binding = 0;
attributeDescriptions[2].location = 2;
attributeDescriptions[2].format = VK_FORMAT_R32G32B32A32_SFLOAT; //This is an error. I guess this has to do something with texCoord being a glm::vec2
attributeDescriptions[2].offset = offsetof(Vertex, texCoord);
return attributeDescriptions;
}
So, attributes descriptions format regarding the texture coordinates should be
attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT;
Now my polygon looks like this:
I'm developing a directx11 app and for some reason, my simple application crashes with an access violation error. Something causes it to crash and then my graphics drivers causes a reset.
Here's my initialization:
DirectXRendererImpl::DirectXRendererImpl(const EngineSettings& settings, Logger& logger) : mLogger(logger), mWindowHandle(GetActiveWindow()), mSwapchain(nullptr), mBackbuffer(nullptr), mDevice(nullptr), mContext(nullptr),
mForwardVertexShader(nullptr), mForwardPixelShader(nullptr), mVertexBuffer(nullptr), mInputLayout(nullptr)
{
// create swapchain, device and devicecontext
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferCount = 2;
swapChainDesc.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.OutputWindow = mWindowHandle;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.Windowed = true;
const D3D_FEATURE_LEVEL featureLevel = D3D_FEATURE_LEVEL_11_0;
const UINT numFeatureLevels = 1;
HRESULT result = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, &featureLevel, numFeatureLevels, D3D11_SDK_VERSION, &swapChainDesc, &mSwapchain, &mDevice, NULL, &mContext);
if (result != S_OK)
{
JONS_LOG_ERROR(mLogger, "DirectXRenderer::DirectXRenderer(): D3D11CreateDeviceAndSwapChain failed: code " + result);
throw std::runtime_error("DirectXRenderer::DirectXRenderer(): D3D11CreateDeviceAndSwapChain failed: code " + result);
}
// backbuffer rendertarget setup
ID3D11Texture2D* backbuffer = nullptr;
mSwapchain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&backbuffer);
mDevice->CreateRenderTargetView(backbuffer, NULL, &mBackbuffer);
backbuffer->Release();
mContext->OMSetRenderTargets(1, &mBackbuffer, NULL);
// setup viewport
// query width/height from d3d
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
mSwapchain->GetDesc(&swapChainDesc);
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = static_cast<float>(swapChainDesc.BufferDesc.Width);
viewport.Height = static_cast<float>(swapChainDesc.BufferDesc.Height);
viewport.MinDepth = 0.0f;
viewport.MaxDepth = 1.0f;
mContext->RSSetViewports(1, &viewport);
// create shader objects
mDevice->CreateVertexShader(gForwardVertexShader, sizeof(gForwardVertexShader), NULL, &mForwardVertexShader);
mDevice->CreatePixelShader(gForwardPixelShader, sizeof(gForwardPixelShader), NULL, &mForwardPixelShader);
mContext->VSSetShader(mForwardVertexShader, NULL, NULL);
mContext->PSSetShader(mForwardPixelShader, NULL, NULL);
// fill vertex buffer
VERTEX OurVertices[] =
{
{ 0.0f, 0.5f, 0.0f },
{ 0.45f, -0.5, 0.0f},
{ -0.45f, -0.5f, 0.0f }
};
D3D11_BUFFER_DESC bufferDescription;
ZeroMemory(&bufferDescription, sizeof(D3D11_BUFFER_DESC));
bufferDescription.Usage = D3D11_USAGE_DEFAULT;
bufferDescription.ByteWidth = sizeof(OurVertices);
bufferDescription.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bufferDescription.CPUAccessFlags = 0;
bufferDescription.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA initData;
ZeroMemory(&initData, sizeof(D3D11_SUBRESOURCE_DATA));
initData.pSysMem = OurVertices;
mDevice->CreateBuffer(&bufferDescription, &initData, &mVertexBuffer);
D3D11_INPUT_ELEMENT_DESC inputDescription;
ZeroMemory(&inputDescription, sizeof(D3D11_INPUT_ELEMENT_DESC));
inputDescription.SemanticName = "POSITION";
inputDescription.SemanticIndex = 0;
inputDescription.Format = DXGI_FORMAT_R32G32B32_FLOAT;
inputDescription.InputSlot = 0;
inputDescription.AlignedByteOffset = 0;
inputDescription.InputSlotClass = D3D11_INPUT_PER_VERTEX_DATA;
inputDescription.InstanceDataStepRate = 0;
mDevice->CreateInputLayout(&inputDescription, 1, gForwardVertexShader, sizeof(gForwardVertexShader), &mInputLayout);
mContext->IASetInputLayout(mInputLayout);
// register as window subclass to listen for WM_SIZE events. etc
if (!SetWindowSubclass(mWindowHandle, WndProc, gSubClassID, 0))
{
JONS_LOG_ERROR(mLogger, "DirectXRenderer::DirectXRenderer(): SetWindowSubclass() failed");
throw std::runtime_error("DirectXRenderer::DirectXRenderer(): SetWindowSubclass() failed");
}
gDirectXRendererImpl = this;
}
The simple vertex shader:
struct VOut
{
float4 position : SV_POSITION;
float4 color : COLOR;
};
VOut main(float4 position : POSITION)
{
VOut output;
output.position = position;
output.color = float4(1.0, 0.0, 0.0, 0.0);
return output;
}
Simple pixel shader:
float4 main(float4 position : POSITION, float4 color : COLOR) : SV_TARGET
{
return color;
}
Rendering function is simply this:
void DirectXRendererImpl::Render(const RenderQueue& renderQueue, const RenderableLighting& lighting, const DebugOptions::RenderingMode debugMode, const DebugOptions::RenderingFlags debugExtra)
{
const FLOAT clearColor[4] = { 1.0f, 1.0f, 0.0f, 1.0f };
mContext->ClearRenderTargetView(mBackbuffer, clearColor);
uint32_t vertexSize = sizeof(VERTEX);
uint32_t offset = 0;
mContext->IASetVertexBuffers(0, 1, &mVertexBuffer, &vertexSize, &offset);
mContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
mContext->Draw(3, 0);
mSwapchain->Present(0, 0);
}
The app runs for a few seconds and then causes a full video driver reset. Is my app leaking memory or what is causing this?
EDIT: more specifically, this is the error:
First-chance exception at 0x0F69974F (nvwgf2um.dll) in ExampleGame.exe: 0xC0000005: Access violation writing location 0x00193000.
Unhandled exception at 0x0F69974F (nvwgf2um.dll) in ExampleGame.exe: 0xC0000005: Access violation writing location 0x00193000.
You have to use this->
bufferDescription.ByteWidth = sizeof(VERTEX) * sizeof(OurVertices); otherwise program will be crashed.
I'm just starting out in DirectX 11, and I've run into a problem. I'm loading an extremely simple shader in an HSDL file:
struct VOut{
float4 position : SV_POSITION;
float4 color : COLOR;
};
VOut VS_Main( float4 pos : POSITION, float4 color : COLOR )
{
VOut output;
output.position = pos;
output.color = color;
return output;
}
float4 PS_Main( float4 pos : SV_POSITION, float4 color : COLOR ) : SV_TARGET
{
return color;
}
Using the following code (that DOES NOT trigger the if(FAILED(result) statement):
bool PathfindingDemoBase::CompileD3DShader(char* filePath, char* entry, char* shaderModel, ID3DBlob** buffer){
DWORD shaderFlags = D3DCOMPILE_ENABLE_STRICTNESS;
#if defined(DEBUG) || defined(_DEBUG)
shaderFlags |= D3DCOMPILE_DEBUG;
#endif
ID3DBlob* errorBuffer = 0;
HRESULT result;
result = D3DX11CompileFromFile(filePath, 0, 0, entry, shaderModel,
0, 0, 0, buffer, &errorBuffer, 0);
if(FAILED(result)){
if(errorBuffer != 0){
OutputDebugStringA((char*)errorBuffer->GetBufferPointer());
errorBuffer->Release();
}
return false;
}
if(errorBuffer != 0){
errorBuffer->Release();
}
return true;
}
But what does happen is in the LoadContent function, the commented line (//<---) triggers an "E_INVALIDARG" result (in d3dResult), but the vsBuffer-> calls aren't returning null :/
bool AStarDemo::LoadContent(){
ID3D10Blob* vsBuffer = 0;
bool compileResult = CompileD3DShader("ReturnColor.hlsl", "VS_Main", "vs_5_0", &vsBuffer);
if(compileResult == false){
MessageBox(0, "Error loading vertex shader!", "Compile Error", MB_OK);
return false;
}
HRESULT d3dResult;
d3dResult = d3dDevice->CreateVertexShader(vsBuffer->GetBufferPointer(),
vsBuffer->GetBufferSize(), 0, &returnColorVS);
//actually set the vertex shader
d3dContext->VSSetShader(returnColorVS, 0, 0);
if(FAILED(d3dResult)){
if(vsBuffer){
vsBuffer->Release();
}
return false;
}
D3D11_INPUT_ELEMENT_DESC solidColorLayout[] ={
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
unsigned int totalLayoutElements = ARRAYSIZE(solidColorLayout);
d3dResult = d3dDevice->CreateInputLayout(solidColorLayout, totalLayoutElements,
vsBuffer->GetBufferPointer(), vsBuffer->GetBufferSize(), &inputLayout);
vsBuffer->Release();
if(FAILED(d3dResult)){
return false;
}
ID3D10Blob* psBuffer = 0;
compileResult = CompileD3DShader("ReturnColor.hlsl", "PS_Main", "ps_5_0", &psBuffer);
//actually set the pixel shader
d3dContext->PSSetShader(returnColorPS, 0, 0);
if(compileResult == false){
MessageBox(0, "Error loading pixel shader!", "Compile Error", MB_OK);
return false;
}
d3dResult = d3dDevice->CreatePixelShader(psBuffer->GetBufferPointer(),
psBuffer->GetBufferSize(), 0, &returnColorPS);
psBuffer->Release();
if(FAILED(d3dResult)){
return false;
}
Vertex vertices[4 * TOTAL_NUMBER_GRID_SQUARES];
gridSquares = new GridSquare[TOTAL_NUMBER_GRID_SQUARES];
//for each grid square
for(int i = 0; i < TOTAL_NUMBER_GRID_SQUARES; i++){
XMFLOAT4 color = ((i%2) == 0) ? GREEN : RED;
//create a grid square object
GridSquare gs;
gs.TopLeft = Vertex( XMFLOAT3(0.5f * i, 0.5f * i, Z_LEVEL),
color);
gs.TopRight = Vertex( XMFLOAT3( 0.5f * i, -0.5f * i, Z_LEVEL),
color);
gs.BottomRight = Vertex( XMFLOAT3(-0.5f * i, -0.5f * i, Z_LEVEL),
color);
gs.BottomLeft = Vertex( XMFLOAT3(-0.5f * i, 0.5f * i, Z_LEVEL),
color);
gs.type = ((i%2) == 0) ? prey : hunter;
gridSquares[i] = gs;
vertices[4*i] = gs.TopLeft;
vertices[(4*i)+1] = gs.TopRight;
vertices[(4*i)+2] = gs.BottomRight;
vertices[(4*i)+3] = gs.BottomLeft;
}
D3D11_BUFFER_DESC vertexDesc;
ZeroMemory(&vertexDesc, sizeof(vertexDesc));
vertexDesc.Usage = D3D11_USAGE_DYNAMIC;
vertexDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexDesc.ByteWidth = 4 * TOTAL_NUMBER_GRID_SQUARES * sizeof(Vertex);
vertexDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
D3D11_SUBRESOURCE_DATA resourceData;
ZeroMemory(&resourceData, sizeof(resourceData));
resourceData.pSysMem = vertices;
d3dResult = d3dDevice->CreateBuffer(&vertexDesc, &resourceData, &vertexBuffer);
if(FAILED(d3dResult)){
return false;
}
return true;
}
Any advice would be greatly appreciated! :)
Found the answer after digging around a bit- apparently there are some problems with using "ps_5_0" and "vs_5_0", even though my machine runs DirectX 11