I'm learning DirectX 12. I would like to draw a triangle, but DirectX 12 doesn't draw it. I wonder if there's something I forgot.
This is Init() part.
void Engine::Init() {
mDevice = new Device();
mCommandQueue = new CommandQueue();
mSwapChain = new SwapChain();
mDescriptorHeap = new DescriptorHeap();
mRootSignature = new RootSignature();
mDevice->Init();
mCommandQueue->Init(mDevice->GetDevice());
mSwapChain->Init(mDevice->GetDevice(), mDevice->GetFactory(), mCommandQueue->GetCommandQueue(), mWndInfo);
mDescriptorHeap->Init(mDevice->GetDevice(), mSwapChain);
mRootSignature->Init(mDevice->GetDevice());
mesh = new Mesh();
shader = new Shader(mRootSignature);
D3D12_INPUT_ELEMENT_DESC desc[] = {
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0}
};
shader->Init(mDevice->GetDevice(), L"..\\Resources\\Shader\\default.hlsli");
shader->SetPipelineState(mDevice->GetDevice(), desc, _countof(desc));
std::vector<Vertex> vec(3);
vec[0].pos = Vec3(0.0f, 0.5f, 0.5f);
vec[0].color = Vec4(1.0f, 0.0f, 0.0f, 1.0f);
vec[1].pos = Vec3(0.5f, -0.5f, 0.5f);
vec[1].color = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
vec[2].pos = Vec3(-0.5f, -0.5f, 0.5f);
vec[2].color = Vec4(0.0f, 0.0f, 1.0f, 1.0f);
mesh->Init(mDevice->GetDevice(), vec);
mCommandQueue->WaitForPreviousFrame(mSwapChain);
}
void Mesh::Init(ID3D12Device* const device, const std::vector<Vertex>& vec) {
mVertexCount = static_cast<uint32>(vec.size());
const uint32 bufferSize = mVertexCount * sizeof(Vertex);
D3D12_HEAP_PROPERTIES heapProperties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD);
D3D12_RESOURCE_DESC resourceDesc = CD3DX12_RESOURCE_DESC::Buffer(bufferSize);
ThrowIfFailed(device->CreateCommittedResource(
&heapProperties,
D3D12_HEAP_FLAG_NONE,
&resourceDesc,
D3D12_RESOURCE_STATE_GENERIC_READ,
nullptr,
IID_PPV_ARGS(&mVertexBuffer)));
// Copy the triangle data to the vertex buffer.
void* pVertexDataBuffer = nullptr;
CD3DX12_RANGE readRange(0, 0); // We do not intend to read from this resource on the CPU.
ThrowIfFailed(mVertexBuffer->Map(0, &readRange, &pVertexDataBuffer));
std::memcpy(pVertexDataBuffer, &vec[0], bufferSize);
mVertexBuffer->Unmap(0, nullptr);
// Initialize the vertex buffer view.
mVertexBufferView.BufferLocation = mVertexBuffer->GetGPUVirtualAddress();
mVertexBufferView.StrideInBytes = sizeof(Vertex);
mVertexBufferView.SizeInBytes = bufferSize;
}
After Init, This code will be executed.
void Engine::Render() {
mCommandQueue->RenderBegin(mSwapChain, mDescriptorHeap, shader->GetGraphicsPSO(), &mViewport, &mScissorRect);
mesh->Render(mCommandQueue->GetCommandList());
mCommandQueue->RenderEnd(mSwapChain);
}
void CommandQueue::RenderBegin(SwapChain* swapChain, DescriptorHeap* descHeap, GraphicsPSO* graphicsPSO, const D3D12_VIEWPORT* viewPort, const D3D12_RECT* scissorRect) {
ThrowIfFailed(mCommandAllocator->Reset());
ThrowIfFailed(mCommandList->Reset(mCommandAllocator, graphicsPSO->GetPipelineState()));
mCommandList->SetGraphicsRootSignature(graphicsPSO->GetRootSignatrue()->GetRootSignature());
mCommandList->RSSetViewports(1, viewPort);
mCommandList->RSSetScissorRects(1, scissorRect);
// Indicate that the back buffer will be used as a render target.
D3D12_RESOURCE_BARRIER resourceBarrierDesc = CD3DX12_RESOURCE_BARRIER::Transition(swapChain->GetBackRtvBuffer(),
D3D12_RESOURCE_STATE_PRESENT,
D3D12_RESOURCE_STATE_RENDER_TARGET);
mCommandList->ResourceBarrier(1, &resourceBarrierDesc);
D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = descHeap->GetBackRtvHandle();
const float clearColor[] = { 0.0f, 0.2f, 0.4f, 1.0f };
mCommandList->OMSetRenderTargets(1, &rtvHandle, FALSE, nullptr);
mCommandList->ClearRenderTargetView(rtvHandle, clearColor, 0, nullptr);
}
void Mesh::Render(ID3D12GraphicsCommandList* cmdList) {
cmdList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
cmdList->IASetVertexBuffers(0, 1, &mVertexBufferView);
cmdList->DrawInstanced(mVertexCount, 1, 0, 0);
}
void CommandQueue::RenderEnd(SwapChain* swapChain) {
// Indicate that the back buffer will now be used to present.
D3D12_RESOURCE_BARRIER resourceBarrierDesc = CD3DX12_RESOURCE_BARRIER::Transition(swapChain->GetBackRtvBuffer(),
D3D12_RESOURCE_STATE_RENDER_TARGET,
D3D12_RESOURCE_STATE_PRESENT);
mCommandList->ResourceBarrier(1, &resourceBarrierDesc);
ThrowIfFailed(mCommandList->Close());
// Execute the command list.
ID3D12CommandList* ppCommandLists[] = { mCommandList };
mCommandQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists);
// Back buffer <-> Front buffer
swapChain->Present();
WaitForPreviousFrame(swapChain);
swapChain->SwapIndex();
}
Result is like this:
enter image description here
There's no error. I compared my code with DirectX sample code "Hello Triangle", but I wrote most of DirectX code to render such as OMSetRenderTargets, ResourceBarrier, etc.
Windows 10, GTX 750ti, support DirectX 12.
I cought the reason myself. It is because I got wrong with initialization of pipeline states.
ID3D12PipelineState* pipelineState = mGraphicsPSO->GetPipelineState();
D3D12_GRAPHICS_PIPELINE_STATE_DESC desc = mGraphicsPSO->GetGraphicsPipelineStateDesc();
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&pipelineState)));
this was my code where create graphics pipeline state. When I wrote this code, I had an error.
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&mGraphicsPSO->GetPipelineState())));
I wanted to write like above. But it makes compile error, which is "C++ expression must be an lvalue or a function designator", because mGraphicsPSO->GetPipelineState()'s return is rvalue.
So, I thought that it might work if I wrote like the original code(the first code). UNFORTUNATELY it doesn't make compile error. It "works".
The problem of the first code is that the pipeline state pointer in class of GraphicsPSO is still nullptr because ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&pipelineState))); this code will create graphics pipeline state to "pipelineState" which is not a member of class of GraphicsPSO.
And I wrote this question to StackOverflow.
Anyway, I suspected the first code and wrote test code:
D3D12_GRAPHICS_PIPELINE_STATE_DESC desc = mGraphicsPSO->GetGraphicsPipelineStateDesc();
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&mGraphicsPSO->mPipelineState)));
I moved mPipelineState from protected to public and wrote code like above. It works well.
enter image description here
This is result.
I found the fundamental reason of the error but it doesn't mean that it is finished. I need to think how to fix my first code.
Related
I want to render a triangle with D3D12, but it doesn't function. I followed the Direct 3D Programming Guide by Microsoft.
I can clear the render target view and in the output I become a COMMAND_LIST_DRAW_VERTEX_BUFFER_TOO_SMALL warning (DrawInstanced: Vertex Buffer at the input vertex slot 0 is not big enough for what the Draw*() call expects to traverse...) every frame. So my thought is, the problem is at the vertices, coping the vertices.
Here is my code:
struct Vertex {
float x;
float y;
};
const Vertex vertices[] = {
{ 0.0f, 0.5f },
{ 0.5f, -0.5f },
{ -0.5f, -0.5f }
};
const UINT vertexBufferSize = sizeof(vertices);
// Create and load the vertex buffers
// I'm using an upload heap, because I can't find any solution about a default heap.
CD3DX12_HEAP_PROPERTIES vertexBufferHeapProperties(D3D12_HEAP_TYPE_UPLOAD/*D3D12_HEAP_TYPE_DEFAULT*/);
auto vertexBufferResourceDesc = CD3DX12_RESOURCE_DESC::Buffer(vertexBufferSize);
THROW_IF_FAILED(m_pDevice->CreateCommittedResource(
&vertexBufferHeapProperties,
D3D12_HEAP_FLAG_NONE,
&vertexBufferResourceDesc,
D3D12_RESOURCE_STATE_GENERIC_READ/*D3D12_RESOURCE_STATE_COPY_SOURCE*/,
nullptr,
IID_PPV_ARGS(&m_pVertexBuffer)
));
// Copy the vertex data to the vertex buffer
UINT8* pVertexDataBegin;
CD3DX12_RANGE readRange(0, 0);
THROW_IF_FAILED(m_pVertexBuffer->Map(0, &readRange, reinterpret_cast<void**>(&pVertexDataBegin)));
memcpy(pVertexDataBegin, vertices, sizeof(vertices));
m_pVertexBuffer->Unmap(0, nullptr);
// Create the vertex buffer views
m_vertexBufferView.BufferLocation = m_pVertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView.SizeInBytes = sizeof(Vertex);
m_vertexBufferView.StrideInBytes = vertexBufferSize;
In the input layout I'm using a DXGI_FORMAT_R32G32_FLOAT and D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA for position.
And in the PopulateCommandList function:
const float clearColor[] = { 0.6f, 0.7f, 0.9f, 1.0f };
m_pCommandList->ClearRenderTargetView(renderTargetViewHandle, clearColor, 0, 0);
m_pCommandList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
m_pCommandList->IASetVertexBuffers(0, 1, &m_vertexBufferView);
m_pCommandList->DrawInstanced(3, 1, 0, 0);
It should be
m_vertexBufferView.BufferLocation = m_pVertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView.SizeInBytes = vertexBufferSize;
m_vertexBufferView.StrideInBytes = sizeof(Vertex);
instead of
m_vertexBufferView.BufferLocation = m_pVertexBuffer->GetGPUVirtualAddress();
m_vertexBufferView.SizeInBytes = sizeof(Vertex);
m_vertexBufferView.StrideInBytes = vertexBufferSize;
So, I'm trying to get my constant buffer in my shader to have a projection matrix that is orthogonal...
Can anyone tell me why I'm rendering nothing now that I tried to do this?
I assumed I needed to make two mapped subresources, and two buffers, one for vertex and the other for constant, maybe this is wrong?
Here is my code:
Vertex OurVertices[] =
{
{ D3DXVECTOR2(-0.5f,-0.5f), D3DXCOLOR(0.0f, 0.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(-0.5f,0.5f), D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(0.5f,0.5f), D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f) },
{ D3DXVECTOR2(-0.5f,-0.5f), D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(0.5f,0.5f), D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f) },
{ D3DXVECTOR2(0.5f,-0.5f), D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f) }
};
// create the vertex buffer
D3D11_BUFFER_DESC vertexBufferDesc;
ZeroMemory(&vertexBufferDesc, sizeof(vertexBufferDesc));
vertexBufferDesc.Usage = D3D11_USAGE_DYNAMIC; // write access access by CPU and GPU
vertexBufferDesc.ByteWidth = sizeof(Vertex) * 6; // size is the VERTEX struct * 3
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER; // use as a vertex buffer
vertexBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; // allow CPU to write in buffer
dev->CreateBuffer(&vertexBufferDesc, NULL, &pVBuffer); // create the buffer
D3D11_MAPPED_SUBRESOURCE ms_Vertex;
devcon->Map(pVBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &ms_Vertex);
memcpy(ms_Vertex.pData, OurVertices, sizeof(OurVertices)); // copy the data
devcon->Unmap(pVBuffer, NULL); // unmap the buffer
devcon->VSSetConstantBuffers(NULL, 1, &pVBuffer); // Finanly set the constant buffer in the vertex shader with the updated values.
MatrixBufferType* dataPtr;
D3D11_BUFFER_DESC constantBufferDesc; // create the constant buffer
ZeroMemory(&constantBufferDesc, sizeof(constantBufferDesc));
constantBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
constantBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
constantBufferDesc.ByteWidth = sizeof(MatrixBufferType);
constantBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
constantBufferDesc.MiscFlags = 0;
constantBufferDesc.StructureByteStride = 0;
dev->CreateBuffer(&constantBufferDesc, NULL, &pCBuffer); // create the buffer
D3D11_MAPPED_SUBRESOURCE ms_CBuffer;
devcon->Map(pCBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &ms_CBuffer);
D3DXMatrixOrthoLH(&m_orthoMatrix, SCREEN_WIDTH, SCREEN_HEIGHT, 0, 1);
D3DXMatrixTranspose(&m_orthoMatrix, &m_orthoMatrix);
dataPtr = (MatrixBufferType*)ms_CBuffer.pData;
dataPtr->projection = m_orthoMatrix;
memcpy(ms_CBuffer.pData, &dataPtr, sizeof(MatrixBufferType));
devcon->Unmap(pCBuffer, NULL);
devcon->VSSetConstantBuffers(NULL, 1, &pCBuffer); // Finally set the constant buffer in the vertex shader with the updated values.
So, attempting to put my projection matrix to use in my shader code:
cbuffer ConstantBuffer
{
matrix world;
matrix view;
matrix projection;
};
VOut VShader(float4 position : POSITION, float4 color : COLOR)
{
VOut output;
output.position = position;
output.position = mul(output.position, projection);
output.color = color;
return output;
}
Causes my quad which was originally rendering, to disappear.
This leads me to believe I'm doing something wrong, I just don't know what yet.
I have to crop a video.
So i modify the TextureCoordinates.
The code is as below.
const GLfloat squareTextureCoordinates[] = {
0.22f , 1.0f,
0.22f, 0.0f,
0.78f, 1.0f,
0.78f, 0.0f,
};
glVertexAttribPointer(positionAttribute, 2, GL_FLOAT, 0, 0, squareVertices);
glVertexAttribPointer(inputTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, squareTextureCoordinates);
This code is work.
However,i need to support many size of iphone .(example:iphone6 have different size to iphone5)
So i use variable rather than const in TextureCoordinates.
And the new code is as below;
const float cropX=(float)(SCREEN_HEIGHT-SCREEN_WIDTH)/(float)(SCREEN_HEIGHT*2);
const float cropXR=1- (float)(SCREEN_HEIGHT-SCREEN_WIDTH)/(float)(SCREEN_HEIGHT*2);
const GLfloat squareTextureCoordinates[] = {
cropX , 1.0f,
cropX, 0.0f,
cropXR, 1.0f,
cropXR, 0.0f,
};
glVertexAttribPointer(positionAttribute, 2, GL_FLOAT, 0, 0, squareVertices);
glVertexAttribPointer(inputTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, squareTextureCoordinates);
However, it doesn't work.
The screen is display nothing.
i cant figure out why i can't use const variable for TextureCoordinates.
I think you could test the second part of your code by changing the const values to the constants you used in the first part:
const float cropX = 0.22f;
const float cropXR = 0.78f;
const GLfloat squareTextureCoordinates[] = {
cropX , 1.0f,
cropX, 0.0f,
cropXR, 1.0f,
cropXR, 0.0f,
};
glVertexAttribPointer(positionAttribute, 2, GL_FLOAT, 0, 0, squareVertices);
glVertexAttribPointer(inputTextureCoordinateAttribute, 2, GL_FLOAT, 0, 0, squareTextureCoordinates);
If this works (it should), you will see you can use const variables as a texture coordinate. However, I reckon the wrong part of the code is this:
const float cropX=(float)(SCREEN_HEIGHT-SCREEN_WIDTH)/(float)(SCREEN_HEIGHT*2);
const float cropXR=1- (float)(SCREEN_HEIGHT-SCREEN_WIDTH)/(float)(SCREEN_HEIGHT*2);
In texture coordinates, correct values are between 0 and 1. If width is bigger than height, this code won't work. In the specific case of an iPhone 5 and 6, which have the same proportion, same texture coordinates should work if the view that renders OpenGL code was resized (if you use autolayout for that view, you will probably solve the problem automatically).
Leaning how to use OpenGL ES under cocos2d-x.
Help me to understand while the below function crashes with EXC_BAD_ACCESS on noise->visit(); which probably means that my OpenGL state is screwed-up by the time the command is executed.
My method:
Sprite* GameLevelLayer::spriteWithColor(Color4F bgColor, float textureWidth, float textureHeight)
{
// 1: Create new RenderTexture
auto rt = RenderTexture::create(textureWidth, textureHeight);
// 2: Call RenderTexture:begin
rt->beginWithClear(bgColor.r, bgColor.g, bgColor.b, bgColor.a);
// 3: Draw into the texture
//// gradient
gradient_command.init(rt->getGlobalZOrder());
gradient_command.func = std::bind(&GameLevelLayer::onDrawGradient, this, textureWidth, textureHeight);
auto renderer = Director::getInstance()->getRenderer();
renderer->addCommand(&gradient_command);
//// noise cloud
BlendFunc blendFunc;
blendFunc.src = GL_DST_COLOR;
blendFunc.dst = GL_ZERO;
auto noise = Sprite::create("Noise.png");
noise->setBlendFunc(blendFunc);
noise->setPosition(Vec2(textureWidth / 2, textureHeight / 2));
// XXX
noise->visit();
// 4: Call RenderTexture:end
rt->end();
// 5: Create a new Sprite from the texture
return Sprite::createWithTexture(rt->getSprite()->getTexture());
}
and the gradient:
void GameLevelLayer::onDrawGradient(float textureWidth, float textureHeight)
{
setGLProgram(ShaderCache::getInstance()->getGLProgram(GLProgram::SHADER_NAME_POSITION_COLOR));
getGLProgram()->use();
getGLProgram()->setUniformsForBuiltins();
float gradientAlpha = 0.8f;
std::vector<Vec2> vertices;
std::vector<Color4F> colors;
vertices.push_back(Vec2(0, 0));
colors.push_back(Color4F(0.0f, 0.0f, 0.0f, 0.0f ));
vertices.push_back(Vec2(textureWidth, 0));
colors.push_back(Color4F(0.0f, 0.0f, 0.0f, 0.0f ));
vertices.push_back(Vec2(0, textureHeight));
colors.push_back(Color4F(0.0f, 0.0f, 0.0f, gradientAlpha ));
vertices.push_back(Vec2(textureWidth, textureHeight));
colors.push_back(Color4F(0.0f, 0.0f, 0.0f, gradientAlpha ));
GL::enableVertexAttribs(GL::VERTEX_ATTRIB_FLAG_POSITION | GL::VERTEX_ATTRIB_FLAG_COLOR);
glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_POSITION, 2, GL_FLOAT, GL_FALSE, 0, vertices.data());
glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_COLOR, 4, GL_FLOAT, GL_FALSE, 0, colors.data());
glBlendFunc(CC_BLEND_SRC, CC_BLEND_DST);
glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei)vertices.size());
}
Is this because I am using parameters of a wrong size, somewhere? How does one go about debugging something like this?
I'm doing something with DirectX 11 and came to a rectagle drawing (empty, non-colored), seemed simple for me at start (linelist_topology, 8 indices) but when I have it on the screen I see that my rectangle is kinda incomleted at left-top coordinate, there is a point of a background color there, the code is not complicated at all, vertices are 2D space:
SIMPLEVERTEX gvFrameVertices[4]=
{XMFLOAT3(0.0f,0.0f,1.0f),XMFLOAT2(0.0f, 0.0f),
XMFLOAT3(1.0f, 0.0f, 1.0f), XMFLOAT2(1.0f, 0.0f),
XMFLOAT3(1.0f, -1.0f, 1.0f), XMFLOAT2(1.0f, 1.0f),
XMFLOAT3(0.0f, -1.0f, 1.0f), XMFLOAT2(0.0f, 1.0f)};
indices:
WORD gvRectangularIndices[8] = { 0, 1, 1, 2, 2, 3, 3, 0 };
Shader just returns given color in constant buffer:
float4 PS_PANEL(PS_INPUT input) : SV_Target
{
return fontcolor;
}
Function code itself:
VOID rectangle(INT _left, INT _top, INT _width, INT _height, XMFLOAT4 _color)
{
XMMATRIX scale;
XMMATRIX translate;
XMMATRIX world;
scale = XMMatrixScaling( _width, _height, 1.0f );
translate = XMMatrixTranslation(_left, gvHeight - _top, 1.0f);
world = scale * translate;
gvConstantBufferData.world = XMMatrixTranspose(world);
gvConstantBufferData.index = 1.0f;
gvConstantBufferData.color = _color;
gvContext->PSSetShader(gvPanelPixelshader, NULL, 0);
gvContext->UpdateSubresource(gvConstantBuffer, 0, NULL, &gvConstantBufferData, 0, 0 );
gvContext->IASetIndexBuffer(gvLinelistIndexBuffer, DXGI_FORMAT_R16_UINT, 0);
gvContext->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_LINELIST );
gvContext->DrawIndexed(8, 0, 0);
gvContext->IASetIndexBuffer(gvTriangleslistIndexBuffer, DXGI_FORMAT_R16_UINT, 0);
gvContext->IASetPrimitiveTopology( D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST );
};
gvHeight - _top - I'm using orthographic matrix for projection, so the coordinate center is at left-bottom, that why need to substract for proper Y coordinate.
gvOrthographicProjection = XMMatrixOrthographicOffCenterLH( 0.0f, gvWidth, 0.0f, gvHeight, 0.01f, 100.0f );
Do you have any idea what can cause this pointal incompletness of a rectangle in my case or I need to supply more code info (don't really want to link lines of the whole initializations cause they seem very obvious and simple for me, done for /at c++ and directx amateur level :)
Thank you:)