Related
I'm Leanrning DirectX11 and I'm trying to draw 2 Quad with a single function but when i call this function , the function override the previous QUAD.
If you want to see the problem , here is a gif :
<video alt="Video from Gyazo" width="1280" autoplay muted loop playsinline controls><source src="https://i.gyazo.com/b819ffc64975c1531434047b9b4a92f7.mp4" type="video/mp4" /></video>
Here is the code :
(here is where i call the function to draw the QUAD)
void Application::ApplicationRun()
{
//wind.SetEventCallBack(std::bind(&Application::OnEvent, Instance, std::placeholders::_1));
Vertex v[] =
{
Vertex(-0.5f, -0.5f, 0.0f, 1.0f), /// botom Left Point - [0]
Vertex(-0.5f, 0.5f, 0.0f, 0.0f), //top Left Point - [1]
Vertex{ 0.0f, -0.5f, 1.0f, 1.0f}, // -[2]
Vertex(0.0f, 0.5f, 1.0f, 0.0f), //Right Point - [3]
};
Vertex vv[] =
{
Vertex(-0.1f, -0.1f, 0.0f, 0.0f), /// botom Left Point - [0]
Vertex(-0.1f, 0.1f, 0.0f, 0.0f), //top Left Point - [1]
Vertex{ 0.05f, -0.1f, 0.0f, 0.0f}, // -[2]
Vertex(0.05f, 0.1f, 0.0f, 0.0f), //Right Point - [3]
};
DWORD indices[] =
{
0,1,2,
1,2,3,
};
while (wind.PorcessMessage())
{
//
if (Armageddon::Application::GetInstance()->GetWindow()->GetNativeKeyBoard().KeyIsPressed(AG_KEY_A))
{
Log::GetLogger()->trace("A ");
wind.GetWindowGraphics()->DrawTriangle(v, ARRAYSIZE(v));
}
if (Armageddon::Application::GetInstance()->GetWindow()->GetNativeKeyBoard().KeyIsPressed(AG_KEY_B))
{
Log::GetLogger()->trace("B");
wind.GetWindowGraphics()->DrawTriangle(vv, ARRAYSIZE(vv));
}
}
}
(Here is the DrawTriangle Function) :
void Armageddon::D3D_graphics::DrawTriangle(Vertex v[], int Vertexcount)
{
HRESULT hr = vertexBuffer.Initialize(this->device.Get(),this->device_context.Get() , v, Vertexcount);
if (FAILED(hr))
{
Armageddon::Log::GetLogger()->error("FAILED INITIALIZE VERTEX BUFFER ");
}
DWORD indices[] =
{
0,1,2,
1,2,3,
};
hr = this->indicesBuffer.Init(this->device.Get(), indices, ARRAYSIZE(indices));
hr = DirectX::CreateWICTextureFromFile(this->device.Get(), L"..\\TestApplication\\assets\\Textures\\tex.png",nullptr,textures.GetAddressOf());
if (FAILED(hr))
{
Armageddon::Log::GetLogger()->error("FAILED INITIALIZE WIC TEXTURE ");
}
}
(Here is where i initialize the Vertex and the Indice buffer) :
HRESULT Initialize(ID3D11Device* device , ID3D11DeviceContext* device_context, T* data, UINT numElements)
{
this->bufferSize = numElements;
this->stride = sizeof(T);
D3D11_BUFFER_DESC vertex_buffer_desc;
ZeroMemory(&vertex_buffer_desc, sizeof(vertex_buffer_desc));
vertex_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
vertex_buffer_desc.ByteWidth = sizeof(Vertex) * numElements;
vertex_buffer_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertex_buffer_desc.CPUAccessFlags = 0;
vertex_buffer_desc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA VertexBufferData;
ZeroMemory(&VertexBufferData, sizeof(VertexBufferData));
VertexBufferData.pSysMem = data;
HRESULT hr = device->CreateBuffer(&vertex_buffer_desc, &VertexBufferData, buffer.GetAddressOf());
UINT offset = 0;
device_context->IASetVertexBuffers(0, 1, buffer.GetAddressOf(), &stride, &offset);
return hr;
};```
HRESULT Init(ID3D11Device* device, DWORD* data, UINT n_indices)
{
D3D11_SUBRESOURCE_DATA Indice_buffer_data;
this->buffer_size = n_indices;
D3D11_BUFFER_DESC Indice_buffer_desc;
ZeroMemory(&Indice_buffer_desc, sizeof(Indice_buffer_desc));
Indice_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
Indice_buffer_desc.ByteWidth = sizeof(DWORD) * n_indices;
Indice_buffer_desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
Indice_buffer_desc.CPUAccessFlags = 0;
Indice_buffer_desc.MiscFlags = 0;
ZeroMemory(&Indice_buffer_data, sizeof(Indice_buffer_data));
Indice_buffer_data.pSysMem = data;
HRESULT hr = device->CreateBuffer(&Indice_buffer_desc, &Indice_buffer_data, buffer.GetAddressOf());
return hr;
};
And here is where I draw the quads :
void Armageddon::D3D_graphics::RenderFrame()
{
float color[] = { 0.1f,0.1f,0.1f,1.0f };
ImGui_ImplDX11_NewFrame();
ImGui_ImplWin32_NewFrame();
ImGui::NewFrame();
if (show_demo_window)
ImGui::ShowDemoWindow(&show_demo_window);
// 2. Show a simple window that we create ourselves. We use a Begin/End pair to created a named window.
{
static float f = 0.0f;
static int counter = 0;
ImGui::Begin("Hello, world!"); // Create a window called "Hello, world!" and append into it.
ImGui::Text("This is some useful text."); // Display some text (you can use a format strings too)
ImGui::Checkbox("Demo Window", &show_demo_window); // Edit bools storing our window open/close state
ImGui::Checkbox("Another Window", &show_another_window);
ImGui::SliderFloat("float", &f, 0.0f, 1.0f); // Edit 1 float using a slider from 0.0f to 1.0f
ImGui::ColorEdit3("clear color", (float*)&clear_color); // Edit 3 floats representing a color
if (ImGui::Button("Button")) // Buttons return true when clicked (most widgets return true when edited/activated)
counter++;
ImGui::SameLine();
ImGui::Text("counter = %d", counter);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
ImGui::End();
}
// 3. Show another simple window.
if (show_another_window)
{
ImGui::Begin("Another Window", &show_another_window); // Pass a pointer to our bool variable (the window will have a closing button that will clear the bool when clicked)
ImGui::Text("Hello from another window!");
if (ImGui::Button("Close Me"))
show_another_window = false;
ImGui::End();
}
ImGui::Render();
this->device_context->OMSetRenderTargets(1, target_view.GetAddressOf(), this->depthStencilView.Get());
this->device_context->ClearRenderTargetView(this->target_view.Get(), color);
this->device_context->ClearDepthStencilView(this->depthStencilView.Get(), D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0);
//ICI QU'ON FAIT TOUT LES RENDU APRES AVOIR CLEAN LE PLAN
this->device_context->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY::D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
f += 0.1;
ConstantBuffer.data.mat = DirectX::XMMatrixRotationRollPitchYaw(0.0f,0.0f,f);
ConstantBuffer.data.mat = DirectX::XMMatrixTranspose(ConstantBuffer.data.mat);
ConstantBuffer.data.Yoffset = f;
ConstantBuffer.data.Xoffset = 0;
// Armageddon::Log::GetLogger()->trace(ConstantBuffer.data.Yoffset);
if (!ConstantBuffer.ApplyChanges())
{
Armageddon::Log::GetLogger()->error("ERRO WHEN APPLYING CHANGES");
}
this->device_context->VSSetConstantBuffers(0, 1, ConstantBuffer.GetAdressOf());
/***********SHADER*******************************/
this->device_context->VSSetShader(vertexShader.GetShader(), NULL, 0);
this->device_context->PSSetShader(pixelShader.GetShader(), NULL, 0);
this->device_context->IASetInputLayout(this->vertexShader.GetInputLayout());
/***********Texture Sampler*******************************/
this->device_context->PSSetSamplers(0, 1, this->ColorSampler.GetAddressOf());
/***********DEPHT BUFFER*******************************/
this->device_context->OMSetDepthStencilState(this->depthStencilState.Get(), 0);
/***********RASTERIZER STATE*******************************/
this->device_context->RSSetState(this->rasterizerState.Get());
/***********UPDATE LES CONSTANTS BUFFER*******************************/
UINT stride = sizeof(Vertex);
UINT offset = 0;
this->device_context->IASetIndexBuffer(indicesBuffer.Get(), DXGI_FORMAT_R32_UINT, 0);
//this->device_context->IASetVertexBuffers(0, 1, vertexBuffer.GetAddressOf(), &stride, &offset);
this->device_context->PSSetShaderResources(0, 1, this->textures.GetAddressOf());
this->device_context->DrawIndexed(indicesBuffer.GetSize(), 0,0);
ImGui_ImplDX11_RenderDrawData(ImGui::GetDrawData());
this->swapchain->Present(1,0);
}
My DirectX application does not render the texture correctly. Result:
Expected from VS editor:
As you can see the cat texture is not completely drawn.
I 'm using WaveFrontReader to load the .OBJ and the .MTL files and WicTextureLoader to load the PNG/JPG.
My HLSL:
cbuffer constants : register(b0)
{
row_major float4x4 transform;
row_major float4x4 projection;
float3 lightvector;
}
struct vs_in
{
float3 position : POS;
float3 normal : NOR;
float2 texcoord : TEX;
float4 color : COL;
};
struct vs_out
{
float4 position : SV_POSITION;
float2 texcoord : TEX;
float4 color : COL;
};
Texture2D mytexture : register(t0);
SamplerState mysampler : register(s0);
vs_out vs_main(vs_in input)
{
float light = clamp(dot(normalize(mul(float4(input.normal, 0.0f), transform).xyz), normalize(-lightvector)), 0.0f, 1.0f) * 0.8f + 0.2f;
vs_out output;
output.position = mul(float4(input.position, 1.0f), mul(transform, projection));
output.texcoord = input.texcoord;
output.color = float4(input.color.rgb * light, input.color.a);
return output;
}
float4 ps_main(vs_out input) : SV_TARGET
{
return mytexture.Sample(mysampler, input.texcoord) * input.color;
}
My preparation:
void Config3DWindow()
{
const wchar_t* tf = L"1.hlsl";
d2d.m_swapChain1->GetBuffer(0, __uuidof(ID3D11Texture2D), reinterpret_cast<void**>(&frameBuffer));
d2d.device->CreateRenderTargetView(frameBuffer, nullptr, &frameBufferView);
frameBuffer->GetDesc(&depthBufferDesc); // base on framebuffer properties
depthBufferDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthBufferDesc.BindFlags = D3D11_BIND_DEPTH_STENCIL;
CComPtr<ID3DBlob> vsBlob;
D3DCompileFromFile(tf, nullptr, nullptr, "vs_main", "vs_5_0", 0, 0, &vsBlob, nullptr);
d2d.device->CreateVertexShader(vsBlob->GetBufferPointer(), vsBlob->GetBufferSize(), nullptr, &vertexShader);
D3D11_INPUT_ELEMENT_DESC inputElementDesc[] =
{
{ "POS", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEX", 0, DXGI_FORMAT_R32G32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COL", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
d2d.device->CreateInputLayout(inputElementDesc, ARRAYSIZE(inputElementDesc), vsBlob->GetBufferPointer(), vsBlob->GetBufferSize(), &inputLayout);
///////////////////////////////////////////////////////////////////////////////////////////////
CComPtr<ID3DBlob> psBlob;
D3DCompileFromFile(tf, nullptr, nullptr, "ps_main", "ps_5_0", 0, 0, &psBlob, nullptr);
d2d.device->CreatePixelShader(psBlob->GetBufferPointer(), psBlob->GetBufferSize(), nullptr, &pixelShader);
D3D11_BUFFER_DESC constantBufferDesc = {};
constantBufferDesc.ByteWidth = sizeof(Constants) + 0xf & 0xfffffff0;
constantBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
constantBufferDesc.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
constantBufferDesc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
d2d.device->CreateBuffer(&constantBufferDesc, nullptr, &constantBuffer);
}
Loading the obj:
WaveFrontReader<UINT> wfr;
wfr.Load(L"12221_Cat_v1_l3.oobj");
wfr.LoadMTL(L"12221_Cat_v1_l3.mtl");
obj.CreateDirect3D2(wfr);
CreateDirect3D2() function:
std::vector<float> Vertices;
// float VertexDataX[] = // float3 position, float3 normal, float2 texcoord, float4 color
auto numV = wf.vertices.size();
Vertices.resize(numV * 12);
for (size_t i = 0; i < numV; i++)
{
auto& v = wf.vertices[i];
float* i2 = Vertices.data() + (i * 12);
// position
i2[0] = v.position.x;
i2[1] = v.position.y;
i2[2] = v.position.z;
// normal
i2[3] = v.normal.x;
i2[4] = v.normal.y;
i2[5] = v.normal.z;
// tx
i2[6] = v.textureCoordinate.x;
i2[7] = v.textureCoordinate.y;
// Colors
i2[8] = 1.0f;
i2[9] = 1.0f;
i2[10] = 1.0f;
i2[11] = 1.0f;
}
D3D11_BUFFER_DESC vertexBufferDesc = {};
vertexBufferDesc.ByteWidth = Vertices.size() * sizeof(float);
vertexBufferDesc.Usage = D3D11_USAGE_IMMUTABLE;
vertexBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
D3D11_SUBRESOURCE_DATA vertexData = { Vertices.data() }; // in data.h
vertexBuffer = 0;
d2d.device->CreateBuffer(&vertexBufferDesc, &vertexData, &vertexBuffer);
// Indices
std::vector<UINT>& Indices = wf.indices;
D3D11_BUFFER_DESC indexBufferDesc = {};
IndicesSize = Indices.size() * sizeof(UINT);
indexBufferDesc.ByteWidth = IndicesSize;
indexBufferDesc.Usage = D3D11_USAGE_IMMUTABLE;
indexBufferDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
D3D11_SUBRESOURCE_DATA indexData = { Indices.data() }; // in data.h
indexBuffer = 0;
d2d.device->CreateBuffer(&indexBufferDesc, &indexData, &indexBuffer);
for (auto& ma : wf.materials)
{
CComPtr<ID3D11Resource> tex;
CComPtr<ID3D11ShaderResourceView> texv;
CreateWICTextureFromFile(d2d.device, d2d.context, ma.strTexture, &tex, &texv,0);
if (tex && texv)
{
OBJFT ot;
ot.texture = tex;
ot.textureView = texv;
textures.push_back(ot);
}
tex = 0;
texv = 0;
}
The drawing function:
void Present(OBJF& o, int Count, _3DP& _3, D2D1_COLOR_F bcol)
{
float w = static_cast<float>(depthBufferDesc.Width); // width
float h = static_cast<float>(depthBufferDesc.Height); // height
float n = 1000.0f; // near
float f = 1000000.0f; // far
matrix rotateX = { 1, 0, 0, 0, 0, static_cast<float>(cos(_3.rotation[0])), -static_cast<float>(sin(_3.rotation[0])), 0, 0, static_cast<float>(sin(_3.rotation[0])), static_cast<float>(cos(_3.rotation[0])), 0, 0, 0, 0, 1 };
matrix rotateY = { static_cast<float>(cos(_3.rotation[1])), 0, static_cast<float>(sin(_3.rotation[1])), 0, 0, 1, 0, 0, -static_cast<float>(sin(_3.rotation[1])), 0, static_cast<float>(cos(_3.rotation[1])), 0, 0, 0, 0, 1 };
matrix rotateZ = { static_cast<float>(cos(_3.rotation[2])), -static_cast<float>(sin(_3.rotation[2])), 0, 0, static_cast<float>(sin(_3.rotation[2])), static_cast<float>(cos(_3.rotation[2])), 0, 0, 0, 0, 1, 0, 0, 0, 0, 1 };
matrix scale = { _3.scale[0], 0, 0, 0, 0, _3.scale[1], 0, 0, 0, 0, _3.scale[2], 0, 0, 0, 0, 1 };
matrix translate = { 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, _3.translation[0], _3.translation[1], _3.translation[2], 1 };
///////////////////////////////////////////////////////////////////////////////////////////
D3D11_MAPPED_SUBRESOURCE mappedSubresource = {};
d2d.context->Map(constantBuffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedSubresource);
Constants* constants = reinterpret_cast<Constants*>(mappedSubresource.pData);
constants->Transform = rotateX * rotateY * rotateZ * scale * translate;
constants->Projection = { 2 * n / w, 0, 0, 0, 0, 2 * n / h, 0, 0, 0, 0, f / (f - n), 1, 0, 0, n * f / (n - f), 0 };
constants->LightVector = { 1.0f, 1.0f, 1.0f };
d2d.context->Unmap(constantBuffer, 0);
///////////////////////////////////////////////////////////////////////////////////////////
FLOAT backgroundColor[4] = { 0.00f, 0.00f, 0.00f, 1.0f };
if (bcol.a > 0)
{
backgroundColor[0] = bcol.r;
backgroundColor[1] = bcol.g;
backgroundColor[2] = bcol.b;
backgroundColor[3] = bcol.a;
}
UINT stride = 12 * 4; // vertex size (12 floats: float3 position, float3 normal, float2 texcoord, float4 color)
UINT offset = 0;
D3D11_VIEWPORT viewport = { 0.0f, 0.0f, w, h, 0.0f, 1.0f };
///////////////////////////////////////////////////////////////////////////////////////////
auto deviceContext = d2d.context;
deviceContext->ClearRenderTargetView(frameBufferView, backgroundColor);
deviceContext->ClearDepthStencilView(depthBufferView, D3D11_CLEAR_DEPTH, 1.0f, 0);
deviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
deviceContext->IASetInputLayout(inputLayout);
deviceContext->IASetVertexBuffers(0, 1, &o.vertexBuffer.p, &stride, &offset);
deviceContext->IASetIndexBuffer(o.indexBuffer, DXGI_FORMAT_R32_UINT, 0);
deviceContext->VSSetShader(vertexShader, nullptr, 0);
deviceContext->VSSetConstantBuffers(0, 1, &constantBuffer.p);
deviceContext->RSSetViewports(1, &viewport);
deviceContext->PSSetShader(pixelShader, nullptr, 0);
std::vector<ID3D11ShaderResourceView*> rsx;
for (auto& t : o.textures)
rsx.push_back(t.textureView);
ID3D11ShaderResourceView** rr = rsx.data();
deviceContext->PSSetShaderResources(0, rsx.size(), rr);
deviceContext->PSSetSamplers(0, 1, &samplerState.p);
deviceContext->OMSetRenderTargets(1, &frameBufferView.p, depthBufferView);
deviceContext->OMSetDepthStencilState(depthStencilState, 0);
///////////////////////////////////////////////////////////////////////////////////////////
DXGI_RGBA ra = { 1,1,1,1 };
deviceContext->DrawIndexed(o.IndicesSize, 0, 0);
d2d.m_swapChain1->Present(1, 0);
}
Entire project here: https://drive.google.com/open?id=1BbW3DUd20bAwei4KjnkUPwgm5Ia1aRxl
This is what I got after I was able to reproduce the issue of OP on my side:
My only change was that I exluded lighting in the shader code:
vs_out vs_main(vs_in input)
{
float light = 1.0f;
//float light = clamp(dot(normalize(mul(float4(input.normal, 0.0f), transform).xyz), normalize(-lightvector)), 0.0f, 1.0f) * 0.8f + 0.2f;
vs_out output;
output.position = mul(float4(input.position, 1.0f), mul(transform, projection));
output.texcoord = input.texcoord;
output.color = float4(input.color.rgb * light, input.color.a);
return output;
}
Then I became aware of the cat's eye on the cat's tail.
That reminded me that a lot of image formats store the image from top to down.
OpenGL textures (and probably Direct3D as well) has usually the origin in the lower left corner. Hence, it's not un-usual that texture images are mirrored vertically (during or after loading the image from file and before sending it to GPU).
To prove my suspicion, I mirrored the image manually (in GIMP) and then (without re-compiling) got this:
It looks like my suspicion was right.
Something is wrong with the image or texture loading in the loader of OP.
i`m trying to implement instanced mesh rendering in Vulkan.
My Problem is that Vulkan uses only the first Vertex from the binded VertexBuffer and duplicate the Value for all Indices.
Output RenderDoc:
RenderDoc output Duplicated Vertex Input
These should be the correct values {{<inPosition>},{<inColor>},{<inTexCoord>}}:
const vkf::Vertex vertices[] = {
{ { -0.2f, -0.5f, 0.0f },{ 1.0f, 0.0f, 0.0f },{ 1.0f, 0.0f } },
{ { 0.5f, -0.5f, 0.0f },{ 0.0f, 1.0f, 0.0f },{ 0.0f, 0.0f } },
{ { 0.5f, 0.5f, 0.0f },{ 0.0f, 0.0f, 1.0f },{ 0.0f, 1.0f } },
{ { -0.5f, 0.5f, 0.0f },{ 1.0f, 1.0f, 1.0f },{ 1.0f, 1.0f } }
};
I've checked the VertexBuffer multiple times and it contains the correct values.
Here is the snipped from my CommandBuffer creating:
vkCmdBindPipeline(m_commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, pipelines.agents);
VkBuffer vertexBuffers[] = { models.agent.verticesBuffer };
VkBuffer instanceBuffers[] = { m_instanceBuffer.buffer };
VkDeviceSize offsets[] = { 0 };
vkCmdBindVertexBuffers(m_commandBuffers[i], 0, 1, vertexBuffers, offsets);
vkCmdBindVertexBuffers(m_commandBuffers[i], 1, 1, instanceBuffers, offsets);
vkCmdBindIndexBuffer(m_commandBuffers[i], models.agent.indexBuffer, 0, VK_INDEX_TYPE_UINT16);
vkCmdBindDescriptorSets(m_commandBuffers[i], VK_PIPELINE_BIND_POINT_GRAPHICS, m_pipelineLayouts.pipelineLayoutAgent, 0, 1, &descriptorSets.agent, 0, nullptr);
vkCmdDrawIndexed(m_commandBuffers[i], static_cast<uint32_t> (models.agent.indexCount), 5, 0, 0, 0);
My first assumption was that my binding description is wrong. But I can`t see the error:
bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
Why is only the first value from the buffer used?
EDIT:
InstanceData:
struct InstanceData
{
glm::vec3 pos;
glm::vec3 rot;
float scale;
static VkVertexInputBindingDescription getBindingDescription()
{
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = INSTANCING_BIND_ID;
bindingDescription.stride = sizeof(InstanceData);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE;
return bindingDescription;
}
};
The whole VkPipelineVertexInputStateCreateInfo:
std::vector<VkVertexInputBindingDescription> bindingDesciption = {};
std::vector<VkVertexInputAttributeDescription> attributeDescriptions = {};
bindingDesciption = {
models.agent.bindingDescription,
InstanceData::getBindingDescription()
};
attributeDescriptions =
{
vertexInputAttributeDescription(VERTEX_BIND_ID, 0, VK_FORMAT_R32G32B32_SFLOAT, offsetof(vkf::Vertex, pos)),
vertexInputAttributeDescription(VERTEX_BIND_ID, 1, VK_FORMAT_R32G32B32_SFLOAT, offsetof(vkf::Vertex, color)),
vertexInputAttributeDescription(INSTANCING_BIND_ID, 2, VK_FORMAT_R32G32B32_SFLOAT, offsetof(InstanceData, pos)),
vertexInputAttributeDescription(VERTEX_BIND_ID, 3, VK_FORMAT_R32G32_SFLOAT, offsetof(vkf::Vertex, texCoord))
};
VkPipelineVertexInputStateCreateInfo vertexInputInfo = {};
vertexInputInfo.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertexInputInfo.vertexBindingDescriptionCount = static_cast<uint32_t>(bindingDesciption.size());
vertexInputInfo.vertexAttributeDescriptionCount = static_cast<uint32_t>(attributeDescriptions.size());
vertexInputInfo.pVertexBindingDescriptions = bindingDesciption.data();
vertexInputInfo.pVertexAttributeDescriptions = attributeDescriptions.data();
EDIT 2
#define VERTEX_BIND_ID 0
#define INSTANCING_BIND_ID 1
EDIT 3:
I`m using VulkanMemoryAllocator.
Creating Staging Buffer
size_t vertexBufferSize = sizeof(vkf::Vertex) *_countof(vertices);
createStagingBuffer(vertexBufferSize );
VkBuffer createStagingBuffer(VkDeviceSize size)
{
VkBuffer buffer;
VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
bufferInfo.size = size;
bufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VmaAllocationCreateInfo allocCreateInfo = {};
allocCreateInfo.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
allocCreateInfo.flags = VMA_ALLOCATION_CREATE_PERSISTENT_MAP_BIT;
VmaAllocationInfo allocInfo = {};
if (vmaCreateBuffer(m_allocator, &bufferInfo, &allocCreateInfo, &buffer, &m_allocation, &allocInfo) != VK_SUCCESS)
{
throw std::runtime_error("failed to create Buffer!");
}
return buffer;
}
Copy vertices:
memcpy(mappedStaging, vertices, vertexBufferSize);
Creating Buffer:
createBuffer(vertexBufferSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, VMA_MEMORY_USAGE_GPU_ONLY);
VkBuffer createBuffer(VkDeviceSize size, VkBufferUsageFlags usage, VmaMemoryUsage vmaUsage)
{
VkBuffer buffer;
VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
bufferInfo.size = size;
bufferInfo.usage = usage;
bufferInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
VmaAllocationCreateInfo allocCreateInfo = {};
allocCreateInfo.usage = vmaUsage;
allocCreateInfo.flags = 0;
VmaAllocationInfo allocInfo;
if (vmaCreateBuffer(m_allocator, &bufferInfo, &allocCreateInfo, &buffer, &m_allocation, &allocInfo) != VK_SUCCESS)
{
throw std::runtime_error("failed to create Buffer!");
}
return buffer;
}
This is how I transfair my Buffer from staging:
copyBuffer(stagingVertexBuffer, verticesBuffer, vertexBufferSize);
void Base::copyBuffer(VkBuffer srcBuffer, VkBuffer dstBuffer, VkDeviceSize size)
{
VkCommandBuffer commandBuffer = beginSingleTimeCommands();
VkBufferCopy copyRegion = {};
copyRegion.dstOffset = 0;
copyRegion.srcOffset = 0;
copyRegion.size = size;
vkCmdCopyBuffer(commandBuffer, srcBuffer, dstBuffer, 1, ©Region);
endSingleTimeCommands(commandBuffer);
}
I checked several times if the data is in the staging Buffer and after modifing the createBuffer Prozess also in the verticesBuffer. They are stored there correctly.
I found the mistake.
The mistake was here:
bindingDesciption = {
models.agent.bindingDescription,
InstanceData::getBindingDescription()
};
By the time the binding took place, models.agent.bindingDescription,was not initialized yet. As a result, the VkVertexInputBindingDescription was faulty.
models.agent.bindingDescription was filled with the standard values for VkVertexInputBindingDescription:
binding = 0
stroke = 0
inputRate = VK_VERTEX_INPUT_RATE_VERTEX
Hello I recently tried to learn DirectX 11 but my program does not draw anything.
The only thing I get is the window with the background color i have chosen
I have divided my program into a library(engine) and a regular project.
The library contains a model class, shader class and a Directx init function.
the S3DData is just a struct containing all relevant classes e.g. swap chain etc.
static bool initDX(logfile* errorlog, S3DData *data){
D3D_FEATURE_LEVEL featureLevels[] = {
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0
};
UINT numFeatureLevels = 3;
D3D_FEATURE_LEVEL featureLevel = D3D_FEATURE_LEVEL_11_0;
HRESULT result = ERROR_SUCCESS;
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
//swapchain and device
bufferDesc.Height = data->WindowHeight;
bufferDesc.Width = data->WindowWidth;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.RefreshRate.Numerator = 60;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.OutputWindow = data->Handle;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.Windowed = data->Windowed;
swapChainDesc.BufferCount = 1;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_DISCARD;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.SampleDesc.Count = 1;
result = D3D11CreateDeviceAndSwapChain(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, NULL, NULL, NULL,
D3D11_SDK_VERSION, &swapChainDesc, &data->SwapChain, &data->Device, NULL, &data->DeviceContext);
if(FAILED(result)){
std::string error;
errorlog->write("failed to create swapchain or device:");
if(result == E_INVALIDARG)
error = "invalid argument";
else if(result == E_OUTOFMEMORY)
error = " no memory";
else if(result == DXGI_ERROR_MORE_DATA)
error = " more data needed for buffer";
else if(result == E_NOTIMPL)
error = " not implemented";
else if(result == DXGI_ERROR_INVALID_CALL)
error = " invalid call";
else
error = std::to_string((unsigned int)result);
errorlog->write(error);
return false;
}
//back buffer and rendertargetview
ID3D11Texture2D *backbuffer;
result = data->SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (void**)&backbuffer);
if(FAILED(result)){
errorlog->write("failed to get backbuffer");
return false;
}
result = data->Device->CreateRenderTargetView(backbuffer, NULL, &data->RenderTargetView);
if(FAILED(result)){
errorlog->write("failed to create render target view");
return false;
}
data->DeviceContext->OMSetRenderTargets(1, &data->RenderTargetView, nullptr);
backbuffer->Release();
ZeroMemory(&data->viewport, sizeof(D3D11_VIEWPORT));
data->viewport.Height = data->WindowHeight;
data->viewport.Width = data->WindowWidth;
data->viewport.TopLeftX = 0;
data->viewport.TopLeftY = 0;
data->DeviceContext->RSSetViewports(1, &data->viewport);
errorlog->write("directx success");
return true;
the function basically creates: the device, swapchain and devicecontext.
and sets: the render target and the viewport
the second funtion is the shader init function:
bool shader::init(std::string vsFile, std::string psFile, S3DData * data){
std::ofstream output;
output.open("shaderErrorLog.txt", std::ios::binary);
_S3DData = data;
_pixelShader = nullptr;
_vertexShader = nullptr;
_layout = nullptr;
HRESULT result;
ID3D10Blob *errorMsg, *pixelShader, *vertexShader;;
unsigned int numElements;
errorMsg = 0;
pixelShader = 0;
vertexShader = 0;
result = D3DX11CompileFromFile(vsFile.c_str(), 0, 0, "VS", "vs_5_0", 0, 0, 0, &vertexShader, &errorMsg, 0);
if(FAILED(result)){
if(errorMsg != nullptr){
char *compilerErrors = (char*)errorMsg->GetBufferPointer();
unsigned int size = errorMsg->GetBufferSize();
output.write(compilerErrors, size);
}
else
{
std::string error ="failed to find file";
output.write(error.c_str(), error.size());
}
return false;
}
result = D3DX11CompileFromFile(psFile.c_str(), 0, 0, "PS", "ps_5_0", 0, 0, 0, &pixelShader, &errorMsg, 0);
if(FAILED(result)){
if(errorMsg){
char *compilerErrors = (char*)errorMsg->GetBufferPointer();
unsigned int size = errorMsg->GetBufferSize();
output.write(compilerErrors, size);
}
else
{
std::string noFileMsg = "file " +psFile +"not found";
output.write(noFileMsg.c_str(), noFileMsg.size());
}
return false;
}
result = _S3DData->Device->CreateVertexShader(vertexShader->GetBufferPointer(), vertexShader->GetBufferSize(), nullptr, &_vertexShader);
if(FAILED(result)){
return false;
}
result = _S3DData->Device->CreatePixelShader(pixelShader->GetBufferPointer(), pixelShader->GetBufferSize(), nullptr, &_pixelShader);
if(FAILED(result)){
return false;
}
//layout of vertex
//in case of color.fx position and color
D3D11_INPUT_ELEMENT_DESC layout[] ={
{"POSITION",0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0}
};
//get num of elements
numElements = 2;
result = _S3DData->Device->CreateInputLayout(layout, numElements, vertexShader->GetBufferPointer(), vertexShader->GetBufferSize(), &_layout);
if(FAILED(result))
return false;
vertexShader->Release();
vertexShader = 0;
pixelShader->Release();
pixelShader = 0;
std::string success = "shader init : success";
output.write(success.c_str() , success.size());
_S3DData->DeviceContext->IASetInputLayout(_layout);
_S3DData->DeviceContext->VSSetShader(_vertexShader, 0, 0);
_S3DData->DeviceContext->PSSetShader(_pixelShader, 0, 0);
return true;
and these are the members of the shader class:
ID3D11VertexShader *_vertexShader;
ID3D11PixelShader *_pixelShader;
ID3D11InputLayout *_layout;
S3DData *_S3DData;
this function creates the shaders and since i only have 1 shader for now,
it sets the shaders and the input layout.
the last function is the model init function:
bool model::init(S3DData *data){
_S3DData = data;
HRESULT result;
vertex *vertexBuffer;
unsigned long* indexBuffer;
D3D11_BUFFER_DESC indexDesc, vertexDesc;
D3D11_SUBRESOURCE_DATA indexData, vertexData;
//create buffers
_vertexCount = 3;
_indexCount = 3;
vertexBuffer = new vertex[_vertexCount];
if(!vertexBuffer)return false;
indexBuffer = new unsigned long[_indexCount];
if(!indexBuffer)return false;
//fill buffers
vertexBuffer[0] = vertex( 0.0f, 1.0f, 1.0f);
vertexBuffer[0] = vertex( 1.0f, -1.0f, 1.0f);
vertexBuffer[0] = vertex( -1.0f, -1.0f, 1.0f);
indexBuffer[0] = 0;
indexBuffer[1] = 1;
indexBuffer[2] = 2;
//bufferDesc
vertexDesc.Usage = D3D11_USAGE_DEFAULT;
vertexDesc.ByteWidth = sizeof(vertex) * _vertexCount;
vertexDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertexDesc.CPUAccessFlags = 0;
vertexDesc.MiscFlags = 0;
vertexDesc.StructureByteStride = 0;
//set subressource data
vertexData.pSysMem = vertexBuffer;
vertexData.SysMemPitch = 0;
vertexData.SysMemSlicePitch = 0;
result = _S3DData->Device->CreateBuffer(&vertexDesc, &vertexData, &_vertex);
if(FAILED(result))return false;
indexDesc.ByteWidth = sizeof(unsigned long) * _indexCount;
indexDesc.Usage = D3D11_USAGE_DEFAULT;
indexDesc.BindFlags = D3D11_BIND_INDEX_BUFFER;
indexDesc.MiscFlags = 0;
indexDesc.CPUAccessFlags = 0;
indexDesc.StructureByteStride = 0;
//set subressource
indexData.pSysMem = indexBuffer;
indexData.SysMemPitch = 0;
indexData.SysMemSlicePitch = 0;
result = _S3DData->Device->CreateBuffer(&indexDesc, &indexData, &_index);
if(FAILED(result))return false;
delete []indexBuffer;
indexBuffer = nullptr;
delete []vertexBuffer;
vertexBuffer = nullptr;
the vertex struct:
struct vertex{
XMFLOAT3 pos;
vertex(){}
vertex(float x, float y, float z):pos(x, y, z){
}
so this function only creates the buffers
in the render function the remaining variable are set:
void model::render(shader *Shader){
unsigned int stride = sizeof(vertex);
unsigned int offset = 0;
_S3DData->DeviceContext->IASetVertexBuffers(0, 1, &_vertex, &stride, &offset);
_S3DData->DeviceContext->IASetIndexBuffer(_index, DXGI_FORMAT_R32_UINT, 0);
//set form of vertex: triangles
_S3DData->DeviceContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
_S3DData->DeviceContext->DrawIndexed(_indexCount, 0, 0);
}
EDIT:
these are the shader codes you requested
Vertex Shader:
struct VSout{
float4 position :SV_POSITION;
};
VSout VS(float4 position:POSITION){
VSout output;
output.position = position;
return output;
}
Pixel Shader:
float4 PS() :SV_TARGET{
float4 newColor = float4(1.0f, 1.0f, 0.0f, 1.0f);
return newColor;
}
this here is a screenshot of the debuger left you have all the draw calls etc. And in the middle you can see the vertex buffer
debuger
thanks for your help in advance.
Looking at the debugger image you posted, the 2nd and 3rd vertices are all 0. This means you didn't fill your vertex buffer properly.
Looking at your code, when you fill your vertex buffer, you're only setting it in the 0 index. So you code looks like this:
vertexBuffer[0] = vertex( 0.0f, 1.0f, 1.0f);
vertexBuffer[0] = vertex( 1.0f, -1.0f, 1.0f);
vertexBuffer[0] = vertex( -1.0f, -1.0f, 1.0f);
And it should look like this:
vertexBuffer[0] = vertex( 0.0f, 1.0f, 1.0f);
vertexBuffer[1] = vertex( 1.0f, -1.0f, 1.0f);
vertexBuffer[2] = vertex( -1.0f, -1.0f, 1.0f);
I have a scene that I load from a obj file using the assimp library. I followed a tutorial in order to do it. It works but textures have different colors compared to the original and they all seem to be flipped top-bottom. The images are all tga (I would also like to get rid of all the aliasing)
here is the result
and the curtain should be red
I load the textures this way using freeImage
bool Texture::Load(){
FIBITMAP* bitmap = FreeImage_Load(
FreeImage_GetFileType(m_fileName.c_str(), 0),
m_fileName.c_str());
FIBITMAP *pImage = FreeImage_ConvertTo32Bits(bitmap);
int nWidth = FreeImage_GetWidth(pImage);
int nHeight = FreeImage_GetHeight(pImage);
glGenTextures(1, &m_textureObj);
glBindTexture(m_textureTarget, m_textureObj);
glTexImage2D(m_textureTarget, 0, GL_RGBA, nWidth, nHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, (void*)FreeImage_GetBits(pImage));
glTexParameterf(m_textureTarget, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(m_textureTarget, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
FreeImage_Unload(pImage);
return true;
}
while the scene is loaded using assimp and processed this way
void Mesh::InitMesh(const aiMesh* paiMesh,
vector<Vector3f>& Positions,
vector<Vector3f>& Normals,
vector<Vector2f>& TexCoords,
vector<unsigned int>& Indices){
const aiVector3D Zero3D(0.0f, 0.0f, 0.0f);
aiMatrix4x4 Scale(0.3f, 0.0f, 0.0f, 0.0f,
0.0f, 0.3f, 0.0f, 0.0f,
0.0f, 0.0f, 0.3f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
// Populate the vertex attribute vectors
for (unsigned int i = 0; i < paiMesh->mNumVertices; i++) {
if (paiMesh->mVertices[i].x < lowX) lowX = paiMesh->mVertices[i].x;
if (paiMesh->mVertices[i].x > highX) highX = paiMesh->mVertices[i].x;
if (paiMesh->mVertices[i].y < lowY) lowY = paiMesh->mVertices[i].y;
if (paiMesh->mVertices[i].y > highY) highY = paiMesh->mVertices[i].y;
if (paiMesh->mVertices[i].z < lowZ) lowZ = paiMesh->mVertices[i].z;
if (paiMesh->mVertices[i].z > highZ) highZ = paiMesh->mVertices[i].z;
//paiMesh->mVertices[i] = Scale*paiMesh->mVertices[i];
const aiVector3D* pPos = &(paiMesh->mVertices[i]);
const aiVector3D* pNormal = &(paiMesh->mNormals[i]);
const aiVector3D* pTexCoord = paiMesh->HasTextureCoords(0) ? &(paiMesh->mTextureCoords[0][i]) : &Zero3D;
Positions.push_back(Vector3f(pPos->x, pPos->y, pPos->z));
Normals.push_back(Vector3f(pNormal->x, pNormal->y, pNormal->z));
TexCoords.push_back(Vector2f(pTexCoord->x, pTexCoord->y));
}
bbox[0] = Vector3f(abs(lowX), abs(lowY), abs(lowZ));
bbox[1] = Vector3f(abs(highX), abs(highY), abs(highZ));
// Populate the index buffer
for (unsigned int i = 0; i < paiMesh->mNumFaces; i++) {
const aiFace& Face = paiMesh->mFaces[i];
assert(Face.mNumIndices == 3);
Indices.push_back(Face.mIndices[0]);
Indices.push_back(Face.mIndices[1]);
Indices.push_back(Face.mIndices[2]);
}
}
and this is how I initialise textures
bool Mesh::InitMaterials(const aiScene* pScene, const string& Filename){
// Extract the directory part from the file name
string::size_type SlashIndex = Filename.find_last_of("/");
string Dir;
if (SlashIndex == string::npos) {
Dir = ".";
}
else if (SlashIndex == 0) {
Dir = "/";
}
else {
Dir = Filename.substr(0, SlashIndex);
}
bool Ret = true;
// Initialize the materials
for (unsigned int i = 0; i < pScene->mNumMaterials; i++) {
const aiMaterial* pMaterial = pScene->mMaterials[i];
m_Textures[i] = NULL;
if (true || pMaterial->GetTextureCount(aiTextureType_DIFFUSE) > 0) {
aiString Path;
if (pMaterial->GetTexture(aiTextureType_DIFFUSE, 0, &Path, NULL, NULL, NULL, NULL, NULL) == AI_SUCCESS) {
string FullPath = Dir + "/" + Path.data;
m_Textures[i] = new Texture(GL_TEXTURE_2D, FullPath.c_str());
if (!m_Textures[i]->Load()) {
printf("Error loading texture '%s'\n", FullPath.c_str());
delete m_Textures[i];
m_Textures[i] = NULL;
Ret = false;
}
else {
printf("%d - loaded texture '%s'\n", i, FullPath.c_str());
}
}
}
}
return Ret;
}
In the end I render everything in this way
void Mesh::Render()
{
glBindVertexArray(m_VAO);
glActiveTexture(GL_TEXTURE0);
GLenum oldObj = 0;
if (m_Textures[m_Entries[0].MaterialIndex]){
m_Textures[m_Entries[0].MaterialIndex]->Bind(GL_TEXTURE0);
oldObj = m_Textures[m_Entries[0].MaterialIndex]->m_textureObj;
}
vector<GLsizei> numIdx;
vector<GLint> baseVc;
vector<void*> idx;
unsigned int drawCount = 0;
for (unsigned int i = 0; i < m_Entries.size(); i++) {
const unsigned int MaterialIndex = m_Entries[i].MaterialIndex;
assert(MaterialIndex < m_Textures.size());
drawCount++;
numIdx.push_back(m_Entries[i].NumIndices);
baseVc.push_back(m_Entries[i].BaseVertex);
idx.push_back((void*)(sizeof(unsigned int) * m_Entries[i].BaseIndex));
if (i == m_Entries.size() - 1){
glDrawElementsBaseVertex(GL_TRIANGLES,
m_Entries[i].NumIndices,
GL_UNSIGNED_INT,
(void*)(sizeof(unsigned int) * m_Entries[i].BaseIndex),
m_Entries[i].BaseVertex);
}else
if (m_Textures[m_Entries[i + 1].MaterialIndex] && m_Textures[m_Entries[i+1].MaterialIndex]->m_textureObj != oldObj) {
glMultiDrawElementsBaseVertex(GL_TRIANGLES,
&numIdx[0],
GL_UNSIGNED_INT,
&idx[0],
drawCount,
&baseVc[0]);
numIdx.clear();
baseVc.clear();
idx.clear();
m_Textures[m_Entries[i + 1].MaterialIndex]->Bind(GL_TEXTURE0);
oldObj = m_Textures[m_Entries[i + 1].MaterialIndex]->m_textureObj;
drawCount = 0;
}else if (!m_Textures[m_Entries[i].MaterialIndex]){
glMultiDrawElementsBaseVertex(GL_TRIANGLES,
&numIdx[0],
GL_UNSIGNED_INT,
&idx[0],
drawCount,
&baseVc[0]);
}
}
// Make sure the VAO is not changed from the outside
glBindVertexArray(0);
}
Sorry for the large amount of code but since I don't know where is the error I posted the entire thing. It's all commented
I had that kind of issue while trying to reduce the size of the textures, i was puting RGBA in the wrong position. You do use Bitmap textures, which means they dont store an alpha channel, try to put them in GL_RGB mode (you are sending them in GL_RGBA).
So it turns out that I had to remove flippedUV and i had to use RGBA as internal format and BGRA for the other format