Can't render a triangle with D3D12 - directx-12

I want to render a triangle with D3D12, but it doesn't function. I followed the Direct 3D Programming Guide by Microsoft.
I can clear the render target view and in the output I become a COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL warning (DrawInstanced: Vertex Buffer at the input vertex slot 0 is not big enough for what the Draw*() call expects to traverse...) every frame. So my thought is, the problem is at the vertices, coping the vertices.
Here is my code:
struct Vertex {
float x;
float y;
};
const Vertex vertices[] = {
{ 0.0f, 0.5f },
{ 0.5f, -0.5f },
{ -0.5f, -0.5f }
};
const UINT vertexBufferSize = sizeof(vertices);
// Create and load the vertex buffers
// I'm using an upload heap, because I can't find any solution about a default heap.
CD3DX12_HEAP_PROPERTIES vertexBufferHeapProperties(D3D12_HEAP_TYPE_UPLOAD/*D3D12_HEAP_TYPE_DEFAULT*/);
auto vertexBufferResourceDesc = CD3DX12_RESOURCE_DESC::Buffer(vertexBufferSize);
THROW_IF_FAILED(m_pDevice->CreateCommittedResource(
&vertexBufferHeapProperties,
D3D12_HEAP_FLAG_NONE,
&vertexBufferResourceDesc,
D3D12_RESOURCE_STATE_GENERIC_READ/*D3D12_RESOURCE_STATE_COPY_SOURCE*/,
nullptr,
IID_PPV_ARGS(&m_pVertexBuffer)
));
// Copy the vertex data to the vertex buffer
UINT8* pVertexDataBegin;
CD3DX12_RANGE readRange(0, 0);
THROW_IF_FAILED(m_pVertexBuffer->Map(0, &readRange, reinterpret_cast<void**>(&pVertexDataBegin)));
memcpy(pVertexDataBegin, vertices, sizeof(vertices));
m_pVertexBuffer->Unmap(0, nullptr);
// Create the vertex buffer views
m_vertexBufferView.BufferLocation = m_pVertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView.SizeInBytes = sizeof(Vertex);
m_vertexBufferView.StrideInBytes = vertexBufferSize;
In the input layout I'm using a DXGI_FORMAT_R32G32_FLOAT and D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA for position.
And in the PopulateCommandList function:
const float clearColor[] = { 0.6f, 0.7f, 0.9f, 1.0f };
m_pCommandList->ClearRenderTargetView(renderTargetViewHandle, clearColor, 0, 0);
m_pCommandList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
m_pCommandList->IASetVertexBuffers(0, 1, &m_vertexBufferView);
m_pCommandList->DrawInstanced(3, 1, 0, 0);

It should be
m_vertexBufferView.BufferLocation = m_pVertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView.SizeInBytes = vertexBufferSize;
m_vertexBufferView.StrideInBytes = sizeof(Vertex);
instead of
m_vertexBufferView.BufferLocation = m_pVertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView.SizeInBytes = sizeof(Vertex);
m_vertexBufferView.StrideInBytes = vertexBufferSize;

Related

DirectX 12 doesn't draw triangle

I'm learning DirectX 12. I would like to draw a triangle, but DirectX 12 doesn't draw it. I wonder if there's something I forgot.
This is Init() part.
void Engine::Init() {
mDevice = new Device();
mCommandQueue = new CommandQueue();
mSwapChain = new SwapChain();
mDescriptorHeap = new DescriptorHeap();
mRootSignature = new RootSignature();
mDevice->Init();
mCommandQueue->Init(mDevice->GetDevice());
mSwapChain->Init(mDevice->GetDevice(), mDevice->GetFactory(), mCommandQueue->GetCommandQueue(), mWndInfo);
mDescriptorHeap->Init(mDevice->GetDevice(), mSwapChain);
mRootSignature->Init(mDevice->GetDevice());
mesh = new Mesh();
shader = new Shader(mRootSignature);
D3D12_INPUT_ELEMENT_DESC desc[] = {
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0}
};
shader->Init(mDevice->GetDevice(), L"..\\Resources\\Shader\\default.hlsli");
shader->SetPipelineState(mDevice->GetDevice(), desc, _countof(desc));
std::vector<Vertex> vec(3);
vec[0].pos = Vec3(0.0f, 0.5f, 0.5f);
vec[0].color = Vec4(1.0f, 0.0f, 0.0f, 1.0f);
vec[1].pos = Vec3(0.5f, -0.5f, 0.5f);
vec[1].color = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
vec[2].pos = Vec3(-0.5f, -0.5f, 0.5f);
vec[2].color = Vec4(0.0f, 0.0f, 1.0f, 1.0f);
mesh->Init(mDevice->GetDevice(), vec);
mCommandQueue->WaitForPreviousFrame(mSwapChain);
}
void Mesh::Init(ID3D12Device* const device, const std::vector<Vertex>& vec) {
mVertexCount = static_cast<uint32>(vec.size());
const uint32 bufferSize = mVertexCount * sizeof(Vertex);
D3D12_HEAP_PROPERTIES heapProperties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD);
D3D12_RESOURCE_DESC resourceDesc = CD3DX12_RESOURCE_DESC::Buffer(bufferSize);
ThrowIfFailed(device->CreateCommittedResource(
&heapProperties,
D3D12_HEAP_FLAG_NONE,
&resourceDesc,
D3D12_RESOURCE_STATE_GENERIC_READ,
nullptr,
IID_PPV_ARGS(&mVertexBuffer)));
// Copy the triangle data to the vertex buffer.
void* pVertexDataBuffer = nullptr;
CD3DX12_RANGE readRange(0, 0); // We do not intend to read from this resource on the CPU.
ThrowIfFailed(mVertexBuffer->Map(0, &readRange, &pVertexDataBuffer));
std::memcpy(pVertexDataBuffer, &vec[0], bufferSize);
mVertexBuffer->Unmap(0, nullptr);
// Initialize the vertex buffer view.
mVertexBufferView.BufferLocation = mVertexBuffer->GetGPUVirtualAddress();
mVertexBufferView.StrideInBytes = sizeof(Vertex);
mVertexBufferView.SizeInBytes = bufferSize;
}
After Init, This code will be executed.
void Engine::Render() {
mCommandQueue->RenderBegin(mSwapChain, mDescriptorHeap, shader->GetGraphicsPSO(), &mViewport, &mScissorRect);
mesh->Render(mCommandQueue->GetCommandList());
mCommandQueue->RenderEnd(mSwapChain);
}
void CommandQueue::RenderBegin(SwapChain* swapChain, DescriptorHeap* descHeap, GraphicsPSO* graphicsPSO, const D3D12_VIEWPORT* viewPort, const D3D12_RECT* scissorRect) {
ThrowIfFailed(mCommandAllocator->Reset());
ThrowIfFailed(mCommandList->Reset(mCommandAllocator, graphicsPSO->GetPipelineState()));
mCommandList->SetGraphicsRootSignature(graphicsPSO->GetRootSignatrue()->GetRootSignature());
mCommandList->RSSetViewports(1, viewPort);
mCommandList->RSSetScissorRects(1, scissorRect);
// Indicate that the back buffer will be used as a render target.
D3D12_RESOURCE_BARRIER resourceBarrierDesc = CD3DX12_RESOURCE_BARRIER::Transition(swapChain->GetBackRtvBuffer(),
D3D12_RESOURCE_STATE_PRESENT,
D3D12_RESOURCE_STATE_RENDER_TARGET);
mCommandList->ResourceBarrier(1, &resourceBarrierDesc);
D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = descHeap->GetBackRtvHandle();
const float clearColor[] = { 0.0f, 0.2f, 0.4f, 1.0f };
mCommandList->OMSetRenderTargets(1, &rtvHandle, FALSE, nullptr);
mCommandList->ClearRenderTargetView(rtvHandle, clearColor, 0, nullptr);
}
void Mesh::Render(ID3D12GraphicsCommandList* cmdList) {
cmdList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
cmdList->IASetVertexBuffers(0, 1, &mVertexBufferView);
cmdList->DrawInstanced(mVertexCount, 1, 0, 0);
}
void CommandQueue::RenderEnd(SwapChain* swapChain) {
// Indicate that the back buffer will now be used to present.
D3D12_RESOURCE_BARRIER resourceBarrierDesc = CD3DX12_RESOURCE_BARRIER::Transition(swapChain->GetBackRtvBuffer(),
D3D12_RESOURCE_STATE_RENDER_TARGET,
D3D12_RESOURCE_STATE_PRESENT);
mCommandList->ResourceBarrier(1, &resourceBarrierDesc);
ThrowIfFailed(mCommandList->Close());
// Execute the command list.
ID3D12CommandList* ppCommandLists[] = { mCommandList };
mCommandQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists);
// Back buffer <-> Front buffer
swapChain->Present();
WaitForPreviousFrame(swapChain);
swapChain->SwapIndex();
}
Result is like this:
enter image description here
There's no error. I compared my code with DirectX sample code "Hello Triangle", but I wrote most of DirectX code to render such as OMSetRenderTargets, ResourceBarrier, etc.
Windows 10, GTX 750ti, support DirectX 12.
I cought the reason myself. It is because I got wrong with initialization of pipeline states.
ID3D12PipelineState* pipelineState = mGraphicsPSO->GetPipelineState();
D3D12_GRAPHICS_PIPELINE_STATE_DESC desc = mGraphicsPSO->GetGraphicsPipelineStateDesc();
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&pipelineState)));
this was my code where create graphics pipeline state. When I wrote this code, I had an error.
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&mGraphicsPSO->GetPipelineState())));
I wanted to write like above. But it makes compile error, which is "C++ expression must be an lvalue or a function designator", because mGraphicsPSO->GetPipelineState()'s return is rvalue.
So, I thought that it might work if I wrote like the original code(the first code). UNFORTUNATELY it doesn't make compile error. It "works".
The problem of the first code is that the pipeline state pointer in class of GraphicsPSO is still nullptr because ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&pipelineState))); this code will create graphics pipeline state to "pipelineState" which is not a member of class of GraphicsPSO.
And I wrote this question to StackOverflow.
Anyway, I suspected the first code and wrote test code:
D3D12_GRAPHICS_PIPELINE_STATE_DESC desc = mGraphicsPSO->GetGraphicsPipelineStateDesc();
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&mGraphicsPSO->mPipelineState)));
I moved mPipelineState from protected to public and wrote code like above. It works well.
enter image description here
This is result.
I found the fundamental reason of the error but it doesn't mean that it is finished. I need to think how to fix my first code.

OpenGL doesn't draw my Quad. Everything seems to be fine. No errors from OpenGL

I'm trying to make a system that allows you to type in a position and scale and it will create a vector that automatically generates all the vertices. The problem is when I try to draw my object it just won't show up. I have used OpenGL's built-in debugging system but it didn't say anything was wrong. So then I tried to manually debug myself but everything seemed to draw just fine.
Renderer::createQuad() method:
Shape Renderer::createQuad(glm::vec2 position, glm::vec2 scale, Shader shader, Texture texture)
{
float x = position.x;
float y = position.y;
float width = scale.x;
float height = scale.y;
std::vector<float> vertices =
{
x+width, y+height, 1.0f, 1.0f, // TR
x+width, y-height, 1.0f, 0.0f, // BR
x-width, y-height, 0.0f, 0.0f, // BL
x-width, y+height, 0.0f, 1.0f // TL
};
std::vector<uint32_t> indices =
{
0, 1, 3,
1, 2, 3
};
m_lenVertices = vertices.size();
m_lenIndices = indices.size();
// these Create methods should be fine as OpenGL does not give me any error
// also I have another function that requires you to pass in the vertex data and indices that works just fine
// I bind the thing I am creating
createVAO();
createVBO(vertices);
createEBO(indices);
createTexture(texture);
createShader(shader.getVertexShader(), shader.getFragmentShader());
Shape shape;
glm::mat4 model(1.0f);
glUniformMatrix4fv(glGetUniformLocation(m_shader, "model"), 1, GL_FALSE, glm::value_ptr(model));
shape.setShader(m_shader);
shape.setVAO(m_VAO);
shape.setTexture(m_texture);
shape.setPosition(position);
return shape;
}
Renderer::draw() method:
void Renderer::draw(Shape shape)
{
if (!m_usingIndices)
{
// Unbinds any other shapes
glBindVertexArray(0);
glUseProgram(0);
shape.bindShader();
shape.bindVAO();
shape.bindTexture();
glDrawArrays(GL_TRIANGLES, 0, m_lenVertices);
}
else
{
// Unbinds any other shapes
glBindVertexArray(0);
glUseProgram(0);
shape.bindShader();
shape.bindVAO();
shape.bindTexture();
glDrawElements(GL_TRIANGLES, m_lenIndices, GL_UNSIGNED_INT, 0);
}
}
Projection matrix:
glm::mat4 m_projectionMat = glm::ortho(-Window::getWidth(), Window::getWidth(), -Window::getHeight(), Window::getHeight, 0.1f, 100.0f);
Creating then rendering the Quad:
// Creates the VBO, VAO, EBO, etc.
quad = renderer.createQuad(glm::vec2(500.0f, 500.0f), glm::vec2(200.0F, 200.0f), LoadFile::loadShader("Res/Shader/VertShader.glsl", "Res/Shader/FragShader.glsl"), LoadFile::loadTexture("Res/Textures/Lake.jpg"));
// In the main game loop we render the quad
quad.setCamera(camera); // Sets the View and Projection matrix for the quad
renderer.draw(quad);
Output:
Output of the code before

Troubles with laying out memory for use by OpenGL (glBufferSubData help needed)

So today I wanted to learn how I can use a Vector2 & Color class to send data to OpenGL with pretty clean looking syntax.
The plan is this.
/*
Vector2 (8 bytes)
Color (16 bytes)
Vector2 + Color = 24 bytes
How can memory be laid out like this?
{Vector2, Color},
{Vector2, Color},
{Vector2, Color}
*/
So I have my two arrays of data.
Vector2 vertices[] = {
Vector2(0.0f, 0.5f),
Vector2(0.5f, -0.5f),
Vector2(-0.5f, -0.5f)
};
// colors would be mapped to there respective indexes in the vertices array (i.e colors[0] is mapped to vertices[0])
Color colors[] = {
Color(1.0f, 0.3f, 0.3f),
Color(0.3f, 1.0f, 0.3f),
Color(0.3f, 0.3f, 1.0f)
};
I was able to get a regular triangle rendering at the correct points,
but sending over the color data has been pretty difficult for me to pull off.
The result I currently get is this.
Here's a snippet of the code.
// positions on screen
Vector2 vertices[] = {
Vector2(0.0f, 0.5f),
Vector2(0.5f, -0.5f),
Vector2(-0.5f, -0.5f)
};
// colors for each position
Color colors[] = {
Color(1.0f, 0.3f, 0.3f),
Color(0.3f, 1.0f, 0.3f),
Color(0.3f, 0.3f, 1.0f)
};
// create vertex and array buffers (each bound automatically)
unsigned int vertexArray = createVertexArray();
unsigned int vertexBuffer = createBuffer(GL_ARRAY_BUFFER);
// allocate the data
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices) + sizeof(colors), nullptr, GL_STATIC_DRAW);
// fill up allocated memory
for (int i = 0; i < 3; ++i) {
glBufferSubData(GL_ARRAY_BUFFER, sizeof(Vector2) * i, sizeof(Vector2), &vertices[i]);
glBufferSubData(GL_ARRAY_BUFFER, (sizeof(Vector2) + sizeof(Color)) * (i + 1), sizeof(Color), &colors[i]);
}
// set up vertex attributes
glVertexAttribPointer(0, 2, GL_FLOAT, false, sizeof(Vector2) + sizeof(Color), nullptr);
glVertexAttribPointer(1, 3, GL_FLOAT, false, sizeof(Vector2) + sizeof(Color), (const void*)( sizeof(Vector2) ));
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
You need to add 1 vertex and 1 color alternately to the buffer:
for (int i = 0; i < 3; ++i)
{
GLintptr offsetV = i * (sizeof(Vector2) + sizeof(Color));
glBufferSubData(GL_ARRAY_BUFFER, offsetV, sizeof(Vector2), &vertices[i]);
GLintptr offsetC = offsetV + sizeof(Vector2);
glBufferSubData(GL_ARRAY_BUFFER, offsetC, sizeof(Color), &colors[i]);
}

Setting Constant Buffer Directx

So, I'm trying to get my constant buffer in my shader to have a projection matrix that is orthogonal...
Can anyone tell me why I'm rendering nothing now that I tried to do this?
I assumed I needed to make two mapped subresources, and two buffers, one for vertex and the other for constant, maybe this is wrong?
Here is my code:
Vertex OurVertices[] =
{
{ D3DXVECTOR2(-0.5f,-0.5f), D3DXCOLOR(0.0f, 0.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(-0.5f,0.5f), D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(0.5f,0.5f), D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f) },
{ D3DXVECTOR2(-0.5f,-0.5f), D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(0.5f,0.5f), D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(0.5f,-0.5f), D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f) }
};
// create the vertex buffer
D3D11_BUFFER_DESC vertexBufferDesc;
ZeroMemory(&vertexBufferDesc, sizeof(vertexBufferDesc));
vertexBufferDesc.Usage = D3D11_USAGE_DYNAMIC; // write access access by CPU and GPU
vertexBufferDesc.ByteWidth = sizeof(Vertex) * 6; // size is the VERTEX struct * 3
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER; // use as a vertex buffer
vertexBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; // allow CPU to write in buffer
dev->CreateBuffer(&vertexBufferDesc, NULL, &pVBuffer); // create the buffer
D3D11_MAPPED_SUBRESOURCE ms_Vertex;
devcon->Map(pVBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &ms_Vertex);
memcpy(ms_Vertex.pData, OurVertices, sizeof(OurVertices)); // copy the data
devcon->Unmap(pVBuffer, NULL); // unmap the buffer
devcon->VSSetConstantBuffers(NULL, 1, &pVBuffer); // Finanly set the constant buffer in the vertex shader with the updated values.
MatrixBufferType* dataPtr;
D3D11_BUFFER_DESC constantBufferDesc; // create the constant buffer
ZeroMemory(&constantBufferDesc, sizeof(constantBufferDesc));
constantBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
constantBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
constantBufferDesc.ByteWidth = sizeof(MatrixBufferType);
constantBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
constantBufferDesc.MiscFlags = 0;
constantBufferDesc.StructureByteStride = 0;
dev->CreateBuffer(&constantBufferDesc, NULL, &pCBuffer); // create the buffer
D3D11_MAPPED_SUBRESOURCE ms_CBuffer;
devcon->Map(pCBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &ms_CBuffer);
D3DXMatrixOrthoLH(&m_orthoMatrix, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 1);
D3DXMatrixTranspose(&m_orthoMatrix, &m_orthoMatrix);
dataPtr = (MatrixBufferType*)ms_CBuffer.pData;
dataPtr->projection = m_orthoMatrix;
memcpy(ms_CBuffer.pData, &dataPtr, sizeof(MatrixBufferType));
devcon->Unmap(pCBuffer, NULL);
devcon->VSSetConstantBuffers(NULL, 1, &pCBuffer); // Finally set the constant buffer in the vertex shader with the updated values.
So, attempting to put my projection matrix to use in my shader code:
cbuffer ConstantBuffer
{
matrix world;
matrix view;
matrix projection;
};
VOut VShader(float4 position : POSITION, float4 color : COLOR)
{
VOut output;
output.position = position;
output.position = mul(output.position, projection);
output.color = color;
return output;
}
Causes my quad which was originally rendering, to disappear.
This leads me to believe I'm doing something wrong, I just don't know what yet.

DirectX 11 LINELIST_TOPOLOGY uncompleted 2D rectangle at left-top coordinates

I'm doing something with DirectX 11 and came to a rectagle drawing (empty, non-colored), seemed simple for me at start (linelist_topology, 8 indices) but when I have it on the screen I see that my rectangle is kinda incomleted at left-top coordinate, there is a point of a background color there, the code is not complicated at all, vertices are 2D space:
SIMPLEVERTEX gvFrameVertices[4]=
{XMFLOAT3(0.0f,0.0f,1.0f),XMFLOAT2(0.0f, 0.0f),
XMFLOAT3(1.0f, 0.0f, 1.0f), XMFLOAT2(1.0f, 0.0f),
XMFLOAT3(1.0f, -1.0f, 1.0f), XMFLOAT2(1.0f, 1.0f),
XMFLOAT3(0.0f, -1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f)};
indices:
WORD gvRectangularIndices[8] = { 0, 1, 1, 2, 2, 3, 3, 0 };
Shader just returns given color in constant buffer:
float4 PS_PANEL(PS_INPUT input) : SV_Target
{
return fontcolor;
}
Function code itself:
VOID rectangle(INT _left, INT _top, INT _width, INT _height, XMFLOAT4 _color)
{
XMMATRIX scale;
XMMATRIX translate;
XMMATRIX world;
scale = XMMatrixScaling( _width, _height, 1.0f );
translate = XMMatrixTranslation(_left, gvHeight - _top, 1.0f);
world = scale * translate;
gvConstantBufferData.world = XMMatrixTranspose(world);
gvConstantBufferData.index = 1.0f;
gvConstantBufferData.color = _color;
gvContext->PSSetShader(gvPanelPixelshader, NULL, 0);
gvContext->UpdateSubresource(gvConstantBuffer, 0, NULL, &gvConstantBufferData, 0, 0 );
gvContext->IASetIndexBuffer(gvLinelistIndexBuffer, DXGI_FORMAT_R16_UINT, 0);
gvContext->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_LINELIST );
gvContext->DrawIndexed(8, 0, 0);
gvContext->IASetIndexBuffer(gvTriangleslistIndexBuffer, DXGI_FORMAT_R16_UINT, 0);
gvContext->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
};
gvHeight - _top - I'm using orthographic matrix for projection, so the coordinate center is at left-bottom, that why need to substract for proper Y coordinate.
gvOrthographicProjection = XMMatrixOrthographicOffCenterLH( 0.0f, gvWidth, 0.0f, gvHeight, 0.01f, 100.0f );
Do you have any idea what can cause this pointal incompletness of a rectangle in my case or I need to supply more code info (don't really want to link lines of the whole initializations cause they seem very obvious and simple for me, done for /at c++ and directx amateur level :)
Thank you:)