Related
I currently have a robotics project which is using many (16) IMU's specifically the MPU9250 running under SPI.
As a reduced example of six sensors using the Bolder flight library
int cs[6] = {21, 25, 26, 27, 32, 14}; //chipselects
MPU9250 IMU0(SPI, 21); // Header P5
MPU9250 IMU1(SPI, 25); // Header P6
MPU9250 IMU2(SPI, 26); // Header P7
MPU9250 IMU3(SPI, 27); // Header P9
MPU9250 IMU4(SPI, 32); // Header P10
MPU9250 IMU5(SPI, 12); // Header P11
To use these sensors they all have to be calibrated and have magnetic hard and soft offsets applied to them live during use, on top of that, I also have to apply gyroscopic and accel. calibration algorithms. Which means, for each sensor, I have to call 9 different data points from each IMU and apply some maths, so I set up some arrays for storing in between values and final values and offsets:
// Offsets applied to raw x/y/z mag values
float mag_offsets[6][3] = {
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 10.44F, 34.76F, -49.86F },
{ 8.62F, 20.41F, -12.65F },
{ -3.05F, 19.75F, -8.55F },
};
// Soft iron error compensation matrix
float mag_softiron_matrix[6][3][3] = {
// IMUs 27, 14, 32
{{ 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }},
{{ 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }},
{{ 0, 0, 0 }, { 0, 0, 0 }, { 0, 0, 0 }},
// IMUs, 21, 25, 26
{{ 1.036F, 0.017F, -0.001F }, { 0.017F, 0.954F, -0.028F }, { -0.001F, 0.028F, 1.013F }},
{{ 1.031F, 0.013F, -0.024F }, { 0.013F, 0.897F, 0.054F }, { -0.024F, 0.054F, 1.085F }},
{{ 1.057F, 0.034F, 0.017F }, { 0.034F, 0.967F, 0.038F }, { 0.017F, 0.038F, 0.981F }},
};
float mag_field_strength[3] = {38.52F, 37.24F , 38.58F };
// Offsets applied to compensate for gyro zero-drift error for x/y/z, sensor dependent
float gyro_zero_offsets[6][3] = {
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
};
// Used for calculating 'in between values' prior to passing to final mag array, sensor dependent
float deltamag[6][3] = {
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
};
// Following array names should always be constant and final values to be given to Magdwick filters, sensor agnostic.
float gyro[6][3] = {
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
};
float accel[6][3] = {
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
};
float mag[6][3] = {
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
{ 0.0F, 0.0F, 0.0F },
};
Then in the loop itself I call each object and get the sensors readings:
void loop(){
IMU0.readSensor();
IMU1.readSensor();
IMU2.readSensor();
IMU3.readSensor();
IMU4.readSensor();
IMU5.readSensor();
// update accel, gyro, mag arrays
float getAccel[6][3] = {
{ IMU0.getAccelX_mss(), IMU0.getAccelY_mss(), IMU0.getAccelZ_mss() },
{ IMU1.getAccelX_mss(), IMU1.getAccelY_mss(), IMU1.getAccelZ_mss() },
{ IMU2.getAccelX_mss(), IMU2.getAccelY_mss(), IMU2.getAccelZ_mss() },
{ IMU3.getAccelX_mss(), IMU3.getAccelY_mss(), IMU3.getAccelZ_mss() },
{ IMU4.getAccelX_mss(), IMU4.getAccelY_mss(), IMU4.getAccelZ_mss() },
{ IMU5.getAccelX_mss(), IMU5.getAccelY_mss(), IMU5.getAccelZ_mss() },
};
float getGyro[6][3] = {
{ IMU0.getGyroX_rads(), IMU0.getGyroY_rads(), IMU0.getGyroZ_rads() },
{ IMU1.getGyroX_rads(), IMU1.getGyroY_rads(), IMU1.getGyroZ_rads() },
{ IMU2.getGyroX_rads(), IMU2.getGyroY_rads(), IMU2.getGyroZ_rads() },
{ IMU3.getGyroX_rads(), IMU3.getGyroY_rads(), IMU3.getGyroZ_rads() },
{ IMU4.getGyroX_rads(), IMU4.getGyroY_rads(), IMU4.getGyroZ_rads() },
{ IMU5.getGyroX_rads(), IMU5.getGyroY_rads(), IMU5.getGyroZ_rads() },
};
float getMag[6][3] = {
{ IMU0.getMagX_uT(), IMU0.getMagY_uT(), IMU0.getMagZ_uT() },
{ IMU1.getMagX_uT(), IMU1.getMagY_uT(), IMU1.getMagZ_uT() },
{ IMU2.getMagX_uT(), IMU2.getMagY_uT(), IMU2.getMagZ_uT() },
{ IMU3.getMagX_uT(), IMU3.getMagY_uT(), IMU3.getMagZ_uT() },
{ IMU4.getMagX_uT(), IMU4.getMagY_uT(), IMU4.getMagZ_uT() },
{ IMU5.getMagX_uT(), IMU5.getMagY_uT(), IMU5.getMagZ_uT() },
};
// Apply magnetic offsets
for (int j = 0; j < 6; j++) {
for (int i = 0; i < 4; i++) {
deltamag[j][i] = getMag[j][i] - mag_offsets[i][j];
}
}
// Apply magnetic softiron offsets
for (int k = 0; k < 6; k++) {
for (int j = 0; j < 6; j++) {
for (int i = 0; i < 4; i++) {
mag[j][i] = deltamag[j][0] * mag_softiron_matrix[k][0][0] + deltamag[j][1] * mag_softiron_matrix[k][0][1] + deltamag[j][2] * mag_softiron_matrix[k][0][2];
}
}
}
// Apply gyroscope offsets
for (int j = 0; j < 6; j++) {
for (int i = 0; i < 4; i++) {
gyro[j][i] = getGyro[j][i] - gyro_zero_offsets[j][i];
}
}
// Update Madgwick filters
filter0.update(gyro[0][0], gyro[0][1], gyro[0][2], accel[0][0], accel[0][1], accel[0][2], mag[0][0], mag[0][1], -1 * mag[0][2]);
filter1.update(gyro[1][0], gyro[1][1], gyro[1][2], accel[1][0], accel[1][1], accel[1][2], mag[1][0], mag[1][1], -1 * mag[1][2]);
filter2.update(gyro[2][0], gyro[2][1], gyro[2][2], accel[2][0], accel[2][1], accel[2][2], mag[2][0], mag[2][1], -1 * mag[2][2]);
filter3.update(gyro[3][0], gyro[3][1], gyro[3][2], accel[3][0], accel[3][1], accel[3][2], mag[3][0], mag[3][1], -1 * mag[3][2]);
filter4.update(gyro[4][0], gyro[4][1], gyro[4][2], accel[4][0], accel[4][1], accel[4][2], mag[4][0], mag[4][1], -1 * mag[4][2]);
filter5.update(gyro[5][0], gyro[5][1], gyro[5][2], accel[5][0], accel[5][1], accel[5][2], mag[5][0], mag[5][1], -1 * mag[5][2]);
// Call All Euler Angle Rotations around {X,Y,Z} or {gamma, delta, epsilon}
float eulerAngles[6][3] = {
{filter0.getRoll(), filter0.getPitch(), filter0.getYaw()},
{filter1.getRoll(), filter1.getPitch(), filter1.getYaw()},
{filter2.getRoll(), filter2.getPitch(), filter2.getYaw()},
{filter3.getRoll(), filter3.getPitch(), filter3.getYaw()},
{filter4.getRoll(), filter4.getPitch(), filter4.getYaw()},
{filter5.getRoll(), filter5.getPitch(), filter5.getYaw()},
};
Serial.print(eulerAngles[0][0]);
Serial.print(eulerAngles[0][1]);
Serial.print(eulerAngles[0][2]);
}
Though the code seems to work the way I expected it, I am confident this is the wrong method to store this data...namely in the getAccel, getGyro, getMag arrays, or to call them like in eulerAngles .
My hunch on this was during initial testing some sensors data that I receive are have an oscillating error being applied to them, which makes me think i'm receiving junk data from the memory somewhere
...I would have used a for loop, but since each object name is individual and do not have indices, I am unsure the best practice, nor the fastest way to call and work with such a large data set. I have found a similar question, though I'm unfortunately too dumb to apply it to my situation.
So the question is what is the proper method to call and store so many objects (and their data) in arrays for further calculations? I would like to avoid having over a hundred variables (when using all 16 IMUs and in-between variables to carry out all the appropriate maths. My apologies for probably terribly written code, my c++/Wiring is not the best.
Research object-oriented programming. Apply encapsulation. Group data depending on object, not similarities - just like you think about them.
Use standard library objects - std::array. Save memory, allow optimization - apply const whenever possible, use constexpr when possible. Research code guidelines and style guides - like https://isocpp.github.io/CppCoreGuidelines/CppCoreGuidelines#S-philosophy and https://google.github.io/styleguide/cppguide.html .
Let's say in pseudocode, you could encapsulate all variables within one object and use member function to calculate the relevant stuff:
class MyStuff { // pick more meaningfull name
// maybe be more verbose
using axisvals = std::array<float, 3>;
private:
// apply constness to save RAM memory
static const std::array<float, 3> mag_field_strength = { 38.52F, 37.24F , 38.58F };
MPU9250 mpu;
FILTER filter;
const std::array<float, 3> mag_offsets;
const std::array<std::array<float, 3> , 6> mag_softiron_matrix;
std::array<float, 3> gyros{}; // maybe some internal state?
public:
MyStuff(int gpionum,
const std::array<float, 3>& mag_offsets,
const std::array<std::array<float, 3> , 6> mag_softiron_matrix) :
mpu{SPI, gpionum},
filter{some, params, for, filter, constructor},
mag_offsets{mag_offsets},
mag_softiron_matrix{mag_softiron_matrix} {
}
void setup() {
// do some setuping stuff
}
axisvals calculate_stuff() {
mpu.readSensor();
// use const as much as possible
const std::array<float, 3> guro = {
something * mpu.getGyroX_rads(),
something * mpu.getGyroY_rads(),
something * mpu.getGyroZ_rads(),
};
// ...
filter.update(
gyro[0], gyro[1], gyro[2],
accel[0], accel[1], accel[0][2],
mag[0], mag[1], -1 * mag[2]);
// ...
return {filter.getRoll(), filter.getPitch(), filter.getYaw()};
}
};
std::array<MyStuff, 6> imus = {
{ 21, {10.44F, 34.76F, -49.86F}, {{1.036F, 0.017F, -0.001F }, {...}, {...} }, // Header P5
{25, {....} {{...},{..}{...} }, // Header P6
// etc....
};
void setup() {
for (auto&& imu : imus) {
imu.setup();
}
}
void loop() {
for (auto&& imu : imus) {
const auto&vals = imu.calculate_stuff();
for (auto&& v : vals) {
Serial.print(v);
}
}
}
I'm Leanrning DirectX11 and I'm trying to draw 2 Quad with a single function but when i call this function , the function override the previous QUAD.
If you want to see the problem , here is a gif :
<video alt="Video from Gyazo" width="1280" autoplay muted loop playsinline controls><source src="https://i.gyazo.com/b819ffc64975c1531434047b9b4a92f7.mp4" type="video/mp4" /></video>
Here is the code :
(here is where i call the function to draw the QUAD)
void Application::ApplicationRun()
{
//wind.SetEventCallBack(std::bind(&Application::OnEvent, Instance, std::placeholders::_1));
Vertex v[] =
{
Vertex(-0.5f, -0.5f, 0.0f, 1.0f), /// botom Left Point - [0]
Vertex(-0.5f, 0.5f, 0.0f, 0.0f), //top Left Point - [1]
Vertex{ 0.0f, -0.5f, 1.0f, 1.0f}, // -[2]
Vertex(0.0f, 0.5f, 1.0f, 0.0f), //Right Point - [3]
};
Vertex vv[] =
{
Vertex(-0.1f, -0.1f, 0.0f, 0.0f), /// botom Left Point - [0]
Vertex(-0.1f, 0.1f, 0.0f, 0.0f), //top Left Point - [1]
Vertex{ 0.05f, -0.1f, 0.0f, 0.0f}, // -[2]
Vertex(0.05f, 0.1f, 0.0f, 0.0f), //Right Point - [3]
};
DWORD indices[] =
{
0,1,2,
1,2,3,
};
while (wind.PorcessMessage())
{
//
if (Armageddon::Application::GetInstance()->GetWindow()->GetNativeKeyBoard().KeyIsPressed(AG_KEY_A))
{
Log::GetLogger()->trace("A ");
wind.GetWindowGraphics()->DrawTriangle(v, ARRAYSIZE(v));
}
if (Armageddon::Application::GetInstance()->GetWindow()->GetNativeKeyBoard().KeyIsPressed(AG_KEY_B))
{
Log::GetLogger()->trace("B");
wind.GetWindowGraphics()->DrawTriangle(vv, ARRAYSIZE(vv));
}
}
}
(Here is the DrawTriangle Function) :
void Armageddon::D3D_graphics::DrawTriangle(Vertex v[], int Vertexcount)
{
HRESULT hr = vertexBuffer.Initialize(this->device.Get(),this->device_context.Get() , v, Vertexcount);
if (FAILED(hr))
{
Armageddon::Log::GetLogger()->error("FAILED INITIALIZE VERTEX BUFFER ");
}
DWORD indices[] =
{
0,1,2,
1,2,3,
};
hr = this->indicesBuffer.Init(this->device.Get(), indices, ARRAYSIZE(indices));
hr = DirectX::CreateWICTextureFromFile(this->device.Get(), L"..\\TestApplication\\assets\\Textures\\tex.png",nullptr,textures.GetAddressOf());
if (FAILED(hr))
{
Armageddon::Log::GetLogger()->error("FAILED INITIALIZE WIC TEXTURE ");
}
}
(Here is where i initialize the Vertex and the Indice buffer) :
HRESULT Initialize(ID3D11Device* device , ID3D11DeviceContext* device_context, T* data, UINT numElements)
{
this->bufferSize = numElements;
this->stride = sizeof(T);
D3D11_BUFFER_DESC vertex_buffer_desc;
ZeroMemory(&vertex_buffer_desc, sizeof(vertex_buffer_desc));
vertex_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
vertex_buffer_desc.ByteWidth = sizeof(Vertex) * numElements;
vertex_buffer_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertex_buffer_desc.CPUAccessFlags = 0;
vertex_buffer_desc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA VertexBufferData;
ZeroMemory(&VertexBufferData, sizeof(VertexBufferData));
VertexBufferData.pSysMem = data;
HRESULT hr = device->CreateBuffer(&vertex_buffer_desc, &VertexBufferData, buffer.GetAddressOf());
UINT offset = 0;
device_context->IASetVertexBuffers(0, 1, buffer.GetAddressOf(), &stride, &offset);
return hr;
};```
HRESULT Init(ID3D11Device* device, DWORD* data, UINT n_indices)
{
D3D11_SUBRESOURCE_DATA Indice_buffer_data;
this->buffer_size = n_indices;
D3D11_BUFFER_DESC Indice_buffer_desc;
ZeroMemory(&Indice_buffer_desc, sizeof(Indice_buffer_desc));
Indice_buffer_desc.Usage = D3D11_USAGE_DEFAULT;
Indice_buffer_desc.ByteWidth = sizeof(DWORD) * n_indices;
Indice_buffer_desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
Indice_buffer_desc.CPUAccessFlags = 0;
Indice_buffer_desc.MiscFlags = 0;
ZeroMemory(&Indice_buffer_data, sizeof(Indice_buffer_data));
Indice_buffer_data.pSysMem = data;
HRESULT hr = device->CreateBuffer(&Indice_buffer_desc, &Indice_buffer_data, buffer.GetAddressOf());
return hr;
};
And here is where I draw the quads :
void Armageddon::D3D_graphics::RenderFrame()
{
float color[] = { 0.1f,0.1f,0.1f,1.0f };
ImGui_ImplDX11_NewFrame();
ImGui_ImplWin32_NewFrame();
ImGui::NewFrame();
if (show_demo_window)
ImGui::ShowDemoWindow(&show_demo_window);
// 2. Show a simple window that we create ourselves. We use a Begin/End pair to created a named window.
{
static float f = 0.0f;
static int counter = 0;
ImGui::Begin("Hello, world!"); // Create a window called "Hello, world!" and append into it.
ImGui::Text("This is some useful text."); // Display some text (you can use a format strings too)
ImGui::Checkbox("Demo Window", &show_demo_window); // Edit bools storing our window open/close state
ImGui::Checkbox("Another Window", &show_another_window);
ImGui::SliderFloat("float", &f, 0.0f, 1.0f); // Edit 1 float using a slider from 0.0f to 1.0f
ImGui::ColorEdit3("clear color", (float*)&clear_color); // Edit 3 floats representing a color
if (ImGui::Button("Button")) // Buttons return true when clicked (most widgets return true when edited/activated)
counter++;
ImGui::SameLine();
ImGui::Text("counter = %d", counter);
ImGui::Text("Application average %.3f ms/frame (%.1f FPS)", 1000.0f / ImGui::GetIO().Framerate, ImGui::GetIO().Framerate);
ImGui::End();
}
// 3. Show another simple window.
if (show_another_window)
{
ImGui::Begin("Another Window", &show_another_window); // Pass a pointer to our bool variable (the window will have a closing button that will clear the bool when clicked)
ImGui::Text("Hello from another window!");
if (ImGui::Button("Close Me"))
show_another_window = false;
ImGui::End();
}
ImGui::Render();
this->device_context->OMSetRenderTargets(1, target_view.GetAddressOf(), this->depthStencilView.Get());
this->device_context->ClearRenderTargetView(this->target_view.Get(), color);
this->device_context->ClearDepthStencilView(this->depthStencilView.Get(), D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0);
//ICI QU'ON FAIT TOUT LES RENDU APRES AVOIR CLEAN LE PLAN
this->device_context->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY::D3D_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP);
f += 0.1;
ConstantBuffer.data.mat = DirectX::XMMatrixRotationRollPitchYaw(0.0f,0.0f,f);
ConstantBuffer.data.mat = DirectX::XMMatrixTranspose(ConstantBuffer.data.mat);
ConstantBuffer.data.Yoffset = f;
ConstantBuffer.data.Xoffset = 0;
// Armageddon::Log::GetLogger()->trace(ConstantBuffer.data.Yoffset);
if (!ConstantBuffer.ApplyChanges())
{
Armageddon::Log::GetLogger()->error("ERRO WHEN APPLYING CHANGES");
}
this->device_context->VSSetConstantBuffers(0, 1, ConstantBuffer.GetAdressOf());
/***********SHADER*******************************/
this->device_context->VSSetShader(vertexShader.GetShader(), NULL, 0);
this->device_context->PSSetShader(pixelShader.GetShader(), NULL, 0);
this->device_context->IASetInputLayout(this->vertexShader.GetInputLayout());
/***********Texture Sampler*******************************/
this->device_context->PSSetSamplers(0, 1, this->ColorSampler.GetAddressOf());
/***********DEPHT BUFFER*******************************/
this->device_context->OMSetDepthStencilState(this->depthStencilState.Get(), 0);
/***********RASTERIZER STATE*******************************/
this->device_context->RSSetState(this->rasterizerState.Get());
/***********UPDATE LES CONSTANTS BUFFER*******************************/
UINT stride = sizeof(Vertex);
UINT offset = 0;
this->device_context->IASetIndexBuffer(indicesBuffer.Get(), DXGI_FORMAT_R32_UINT, 0);
//this->device_context->IASetVertexBuffers(0, 1, vertexBuffer.GetAddressOf(), &stride, &offset);
this->device_context->PSSetShaderResources(0, 1, this->textures.GetAddressOf());
this->device_context->DrawIndexed(indicesBuffer.GetSize(), 0,0);
ImGui_ImplDX11_RenderDrawData(ImGui::GetDrawData());
this->swapchain->Present(1,0);
}
I am trying to render a textured square, but it looks like the texture doesn't get interpolated like it should. It looks like it gets mirrored on each triangle of my square. Behavior is presented on the image below.
Note that I'm using this tutorial.
I don't have an idea where to start fixing my code. Also, when I try to translate my Image layout, I get this error: "Cannot submit cmd buffer using image 0x25 with layout VK_IMAGE_LAYOUT_UNDEFINED when first use is VK_IMAGE_LAYOUT_TRANSFER_DST_BIT."
Also, I get this warning when submitting a drawing command buffer for the first time: "Cannot submit cmd buffer using image 0x25 with layout VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL when first use is VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL."
Can this warning cause my issue? Also, here are my vertices and its texture coordinates.
vertices->setVertices({
{{-1.0f, -1.0f}, {1.0f, 0.0f, 0.0f}, {1.0f, 0.0f}},
{{1.0f, -1.0f}, {0.0f, 1.0f, 0.0f}, {0.0f, 0.0f}},
{{1.0f, 1.0f}, {0.0f, 0.0f, 1.0f}, {0.0f, 1.0f}},
{{-1.0f, 1.0f}, {1.0f, 1.0f, 1.0f}, {1.0f, 1.0f}}
});
vertices->setIndices({ 0, 1, 2, 2, 3, 0 });
Update:
Here is my image transition code:
void Util::transitionImageLayout(VkImage *image, VkFormat format,
VkImageLayout oldLayout, VkImageLayout newLayout,
VkCommandBuffer recordingBuffer) {
VkImageMemoryBarrier barrier = {};
VkImageSubresourceRange subresourceRange = {};
VkPipelineStageFlags sourceStage = {};
VkPipelineStageFlags dstStage = {};
if (oldLayout == VK_IMAGE_LAYOUT_UNDEFINED && newLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) {
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
sourceStage = VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
dstStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
}
else if (oldLayout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && newLayout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
sourceStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
dstStage = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
}
else {
throw std::invalid_argument("Layout transition not supported.");
}
subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresourceRange.baseArrayLayer = 0;
subresourceRange.baseMipLevel = 0;
subresourceRange.layerCount = 1;
subresourceRange.levelCount = 1;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.image = *image;
barrier.oldLayout = oldLayout;
barrier.newLayout = newLayout;
barrier.subresourceRange = subresourceRange;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
vkCmdPipelineBarrier(recordingBuffer, sourceStage, dstStage, 0, 0, nullptr, 0, nullptr, 0, &barrier);
}
Here is my copy buffer to image code:
void Util::copyBufferToimage(VkCommandBuffer cmdBuffer, VkBuffer buffer,
VkImage *image, uint32_t width, uint32_t height) {
VkBufferImageCopy region{};
VkImageSubresourceLayers subresouce{};
subresouce.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresouce.baseArrayLayer = 0;
subresouce.mipLevel = 0;
subresouce.layerCount = 1;
region.bufferImageHeight = 0;
region.bufferOffset = 0;
region.bufferRowLength = 0;
region.imageOffset = { 0, 0, 0 };
region.imageExtent = { width, height, 1 };
region.imageSubresource = subresouce;
vkCmdCopyBufferToImage(cmdBuffer, buffer, *image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
}
Notes:
I have tried running the project from tutorial's git repo, and it works fine. They get no warnings.
The problem was in my vertices struct code which I didn't provide. Format of texture coordinates was wrong. It should be R32 G32, and I have set it to R32 G32 B32 A32.
static std::array<VkVertexInputAttributeDescription, 3> getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 3> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, position);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
attributeDescriptions[2].binding = 0;
attributeDescriptions[2].location = 2;
attributeDescriptions[2].format = VK_FORMAT_R32G32B32A32_SFLOAT; //This is an error. I guess this has to do something with texCoord being a glm::vec2
attributeDescriptions[2].offset = offsetof(Vertex, texCoord);
return attributeDescriptions;
}
So, attributes descriptions format regarding the texture coordinates should be
attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT;
Now my polygon looks like this:
This is a question for anybody experienced with the Oculus Rift C++ SDK.
SDK Version: ovr_sdk_win_1.20.0_public
IDE: VS2013
I am trying to resolve a depth buffer problem in my code for a simplified 3D engine for the Oculus Rift DK2.
The OculusRoomTiny example supplied with the SDK is very complex as it is designed with versatility in mind. So, I've taken the SuperMinimal Sample code and used that as a basis for my engine. The SuperMinimal Sample did did not support multi-colour vertexes or depth buffers. So my code includes both.
I've had no problem creating depth buffers using Microsoft DirectX-11 libraries on PC apps so think it's an Oculus specific challenge. Note: the OculusRoomTiny example depth buffer works fine but is encapsulated in classes too complex for my code.
In my code, the Cube rendered as would be expected but inclusion of the Depth buffer only renders the world background colour.
I ripped the depth buffer code from the OculusRoomTiny_Advanced demo and am sure I have integrated it religiously.
I've posted my code for comment. Please note, it is super minimal and does not clean up the COM objects.
Any advice would be welcome as I've been playing with the code now for 6 weeks!
main.cpp
#include "d3d11.h"
#include "d3dcompiler.h"
#include "OVR_CAPI_D3D.h"
#include "DirectXMath.h"
#include "models.h"
using namespace DirectX;
#pragma comment(lib, "d3dcompiler.lib")
#pragma comment(lib, "dxgi.lib")
#pragma comment(lib, "d3d11.lib")
void CreateSampleModel(ID3D11Device * Device, ID3D11DeviceContext *
Context);
void RenderSampleModel(XMMATRIX * viewProj, ID3D11Device * Device, ID3D11DeviceContext * Context);
ID3D11Buffer * IndexBuffer;
ID3D11RenderTargetView * eyeRenderTexRtv[2][3];
ID3D11DepthStencilView *zbuffer[2]; // containing one for each eye
ovrLayerEyeFov ld = { { ovrLayerType_EyeFov } }; //Only using one layer Update this to an array if using several
ovrSession session;
ID3D11Device * Device;
ID3D11DeviceContext * Context;
void CreateDepthBufferForBothEyes(int eye)
{
// Create the Depth Buffer Texture
D3D11_TEXTURE2D_DESC texd;
ZeroMemory(&texd, sizeof(texd));
texd.Width = ld.Viewport[eye].Size.w;
texd.Height = ld.Viewport[eye].Size.h;
texd.ArraySize = 1;
texd.MipLevels = 1;
texd.SampleDesc.Count = 1; // This matches the RTV
texd.Format = DXGI_FORMAT_D32_FLOAT;
texd.BindFlags = D3D11_BIND_DEPTH_STENCIL;
ID3D11Texture2D *pDepthBuffer;
Device->CreateTexture2D(&texd, NULL, &pDepthBuffer);
// Describe specific properties of the Depth Stencil Buffer
D3D11_DEPTH_STENCIL_VIEW_DESC dsvd;
ZeroMemory(&dsvd, sizeof(dsvd));
dsvd.Format = DXGI_FORMAT_D32_FLOAT; // Make the same as the texture format
dsvd.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2D;
Device->CreateDepthStencilView(pDepthBuffer, &dsvd, &zbuffer[eye]);
pDepthBuffer->Release();
}
//-------------------------------------------------------------------------------------------------
int WINAPI wWinMain(HINSTANCE hInstance, HINSTANCE, LPWSTR, int)
{
// Init Rift and device
ovr_Initialize(0);
ovrGraphicsLuid luid;
ovr_Create(&session, &luid);
IDXGIFactory * DXGIFactory; CreateDXGIFactory1(__uuidof(IDXGIFactory), (void**)(&DXGIFactory));
IDXGIAdapter * DXGIAdapter; DXGIFactory->EnumAdapters(0, &DXGIAdapter);
D3D11CreateDevice(DXGIAdapter, D3D_DRIVER_TYPE_UNKNOWN, 0, 0, 0, 0, D3D11_SDK_VERSION, &Device, 0, &Context);
// Create eye render buffers
for (int eye = 0; eye < 2; eye++)
{
ld.Fov[eye] = ovr_GetHmdDesc(session).DefaultEyeFov[eye];
ld.Viewport[eye].Size = ovr_GetFovTextureSize(session, (ovrEyeType)eye, ld.Fov[eye], 1.0f);
ovrTextureSwapChainDesc desc = {};
desc.Type = ovrTexture_2D;
desc.ArraySize = 1;
desc.Format = OVR_FORMAT_R8G8B8A8_UNORM_SRGB;
desc.Width = ld.Viewport[eye].Size.w;
desc.Height = ld.Viewport[eye].Size.h;
desc.MipLevels = 1;
desc.SampleCount = 1;
desc.StaticImage = ovrFalse;
desc.MiscFlags = ovrTextureMisc_DX_Typeless;
desc.BindFlags = ovrTextureBind_DX_RenderTarget;
ovr_CreateTextureSwapChainDX(session, Device, &desc, &ld.ColorTexture[eye]);
int textureCount = 0; ovr_GetTextureSwapChainLength(session, ld.ColorTexture[eye], &textureCount);
for (int j = 0; j < textureCount; j++) // Creates 3 backbuffers for each eye
{
ID3D11Texture2D* tex; ovr_GetTextureSwapChainBufferDX(session, ld.ColorTexture[eye], j, IID_PPV_ARGS(&tex));
D3D11_RENDER_TARGET_VIEW_DESC rtvd = { DXGI_FORMAT_R8G8B8A8_UNORM, D3D11_RTV_DIMENSION_TEXTURE2D };
Device->CreateRenderTargetView(tex, &rtvd, &eyeRenderTexRtv[eye][j]);
}
CreateDepthBufferForBothEyes(eye);
}
// Create sample model to be rendered in VR
CreateSampleModel(Device,Context);
// Loop for some frames, then terminate
float camX = 0.0f;
float camY = 0.0f;
float camZ = 0.0f;
for (long long frameIndex = 0; frameIndex < 1000;)
{
if (GetKeyState(VK_LEFT) & 0x8000)
camX -= 0.001f;
if (GetKeyState(VK_RIGHT) & 0x8000)
camX += 0.001f;
if (GetKeyState(VK_UP) & 0x8000)
camY += 0.001f;
if (GetKeyState(VK_DOWN) & 0x8000)
camY -= 0.001f;
if (GetKeyState(VK_NEXT) & 0x8000)
camZ += 0.001f;
if (GetKeyState(VK_PRIOR) & 0x8000)
camZ -= 0.001f;
// Get pose using a default IPD
ovrPosef HmdToEyePose[2] = {{{0,0,0,1}, {-0.032f, 0, 0}},
{{0,0,0,1}, {+0.032f, 0, 0}}};
ovrPosef pose[2]; ovr_GetEyePoses(session, 0, ovrTrue, HmdToEyePose, pose, &ld.SensorSampleTime);
//for (int eye = 0; eye < 2; eye++)
// ld.RenderPose[eye] = pose[eye]; // Update the Layer description with the new head position for each eye
// Render to each eye
for (int eye = 0; eye < 2; eye++)
{
ld.RenderPose[eye] = pose[eye]; // Update the Layer description with the new head position for each eye
// Set and clear current render target, and set viewport
int index = 0; ovr_GetTextureSwapChainCurrentIndex(session, ld.ColorTexture[eye], &index);
Context->OMSetRenderTargets(1, &eyeRenderTexRtv[eye][index], zbuffer[eye]); // zbuffer[eye]
Context->ClearRenderTargetView(eyeRenderTexRtv[eye][index], new float[]{ 0.1f, 0.0f, 0.0f, 0.0f });
Context->ClearDepthStencilView(zbuffer[eye], D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0, 0);
D3D11_VIEWPORT D3Dvp; // = { 0, 0, (float)ld.Viewport[eye].Size.w, (float)ld.Viewport[eye].Size.h };
D3Dvp.TopLeftX = 0.0f;
D3Dvp.TopLeftY = 0.0f;
D3Dvp.Width = (float)ld.Viewport[eye].Size.w;
D3Dvp.Height = (float)ld.Viewport[eye].Size.h;
D3Dvp.MinDepth = 0;
D3Dvp.MaxDepth = 1;
Context->RSSetViewports(1, &D3Dvp);
pose[eye].Position.z = 2.0f + camZ; // Move camera 2m from cube
pose[eye].Position.x = camX;
pose[eye].Position.y = camY;
// Calculate view and projection matrices using pose and SDK
XMVECTOR rot = XMLoadFloat4((XMFLOAT4 *)&pose[eye].Orientation);
XMVECTOR pos = XMLoadFloat3((XMFLOAT3 *)&pose[eye].Position);
XMVECTOR up = XMVector3Rotate(XMVectorSet(0, 1, 0, 0), rot);
XMVECTOR forward = XMVector3Rotate(XMVectorSet(0, 0, 1, 0), rot);
XMMATRIX view = XMMatrixLookAtLH(pos, XMVectorAdd(pos, forward), up);
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], 0, 1, ovrProjection_None);
XMMATRIX proj = XMMatrixTranspose(XMLoadFloat4x4((XMFLOAT4X4 *)&p));
// Render model and commit frame
RenderSampleModel(&XMMatrixMultiply(view, proj), Device, Context);
ovr_CommitTextureSwapChain(session, ld.ColorTexture[eye]);
}
// Send rendered eye buffers to HMD, and increment the frame if we're visible
ovrLayerHeader* layers[1] = { &ld.Header };
if (ovrSuccess == ovr_SubmitFrame(session, 0, nullptr, layers, 1))
frameIndex; // was frameIndex++; but changed to loop forever
}
ovr_Shutdown();
}
//---------------------------------------------------------------------------------------
// THIS CODE IS NOT SPECIFIC TO VR OR THE SDK, JUST USED TO DRAW SOMETHING IN VR
//---------------------------------------------------------------------------------------
void CreateSampleModel(ID3D11Device * Device, ID3D11DeviceContext * Context)
{
// Create Vertex Buffer
//#define V(n) (n&1?+1.0f:-1.0f), (n&2?-1.0f:+1.0f), (n&4?+1.0f:-1.0f)
//float vertices[] = { V(0), V(3), V(2), V(6), V(3), V(7), V(4), V(2), V(6), V(1), V(5), V(3), V(4), V(1), V(0), V(5), V(4), V(7) };
D3D11_BUFFER_DESC vbDesc = { sizeof(VERTEX) * NUM_OF_VERTICES, D3D11_USAGE_DEFAULT, D3D11_BIND_VERTEX_BUFFER };
D3D11_SUBRESOURCE_DATA initData = { tList };
ID3D11Buffer* VertexBuffer;
Device->CreateBuffer(&vbDesc, &initData, &VertexBuffer);
//create the index buffer
D3D11_BUFFER_DESC bd;
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(short) * NUM_OF_INDICES; // 3 per triangle, 12 triangles
bd.BindFlags = D3D11_BIND_INDEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
Device->CreateBuffer(&bd, NULL, &IndexBuffer);
D3D11_MAPPED_SUBRESOURCE ms;
Context->Map(IndexBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, indexes, NUM_OF_INDICES * sizeof(short)); // copy the data
Context->Unmap(IndexBuffer, NULL);
// Create Vertex Shader
char* vShader = "float4x4 m;"
"struct VOut { "
" float4 position : SV_POSITION;"
" float4 color : COLOR;"
"}; "
"VOut VS(float4 p1 : POSITION, float4 colour: COLOR)"
"{"
" VOut output;"
" output.position = mul(m, p1);"
" output.color = colour;"
" return output;"
"}";
ID3D10Blob * pBlob; D3DCompile(vShader, strlen(vShader), "VS", 0, 0, "VS", "vs_4_0", 0, 0, &pBlob, 0);
ID3D11VertexShader * VertexShader;
Device->CreateVertexShader(pBlob->GetBufferPointer(), pBlob->GetBufferSize(), 0, &VertexShader);
// Create Input Layout
D3D11_INPUT_ELEMENT_DESC elements[] = {
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_B8G8R8A8_UNORM, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
ID3D11InputLayout * InputLayout;
Device->CreateInputLayout(elements, 2, pBlob->GetBufferPointer(), pBlob->GetBufferSize(), &InputLayout);
// Create Pixel Shader
char* pShader = "float4 PS(float4 position : POSITION, float4 colour : COLOR) : SV_Target { return colour; }";
D3DCompile(pShader, strlen(pShader), "PS", 0, 0, "PS", "ps_4_0", 0, 0, &pBlob, 0);
ID3D11PixelShader * PixelShader;
Device->CreatePixelShader(pBlob->GetBufferPointer(), pBlob->GetBufferSize(), 0, &PixelShader);
Context->IASetInputLayout(InputLayout);
Context->IASetIndexBuffer(IndexBuffer, DXGI_FORMAT_R16_UINT, 0);
UINT stride = sizeof(float) * 4U; // 7U
UINT offset = 0;
Context->IASetVertexBuffers(0, 1, &VertexBuffer, &stride, &offset);
Context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
Context->VSSetShader(VertexShader, 0, 0);
Context->PSSetShader(PixelShader, 0, 0);
}
//------------------------------------------------------
void RenderSampleModel(XMMATRIX * viewProj, ID3D11Device * Device, ID3D11DeviceContext * Context)
{
D3D11_BUFFER_DESC desc = { sizeof(XMMATRIX), D3D11_USAGE_DYNAMIC, D3D11_BIND_CONSTANT_BUFFER, D3D11_CPU_ACCESS_WRITE };
D3D11_SUBRESOURCE_DATA initData = { viewProj };
ID3D11Buffer * ConstantBuffer; Device->CreateBuffer(&desc, &initData, &ConstantBuffer);
Context->VSSetConstantBuffers(0, 1, &ConstantBuffer);
Context->DrawIndexed(NUM_OF_INDICES, 0, 0);
}
models.h
#ifndef MODELS_H
#define MODELS_H
#include "DirectXMath.h"
using namespace DirectX;
#define NUM_OF_MODELS 1
struct VERTEX{
float x;
float y;
float z;
uint32_t C; // Colour
};
#define NUM_OF_VERTICES 24
#define NUM_OF_INDICES 36
extern VERTEX tList[];
extern short indexes[];
#endif
models.cpp
#include "models.h"
VERTEX tList[] = {
{ -0.1f, 0.1f, 0.2f, 0xFF0000FF }, // Cube Vertex Index 0
{ 0.1f, 0.1f, 0.2f, 0xFF0000FF }, // 1
{ -0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 2
{ 0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 3
{ -0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 4
{ 0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 5
{ -0.1f, -0.1f, -0.0f, 0x00FF00FF }, // 6
{ 0.1f, -0.1f, -0.0f, 0x00FF00FF }, // 7
{ 0.1f, 0.1f, 0.2f, 0x0000FFFF }, // 8
{ 0.1f, 0.1f, -0.0f, 0x0000FFFF }, // 9
{ 0.08f, -0.1f, 0.2f, 0x0000FFFF }, // 10 Distorted in prep to test Depth
{ 0.08f, -0.1f, -0.0f, 0x0000FFFF }, // 11 Distorted in prep to test Depth
{ -0.1f, 0.1f, 0.2f, 0xFF0000FF }, // 12
{ -0.1f, 0.1f, -0.0f, 0xFF0000FF }, // 13
{ -0.1f, -0.1f, 0.2f, 0xFF0000FF }, // 14
{ -0.1f, -0.1f, -0.0f, 0xFF0000FF }, // 15
{ -0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 16
{ 0.1f, 0.1f, -0.0f, 0x00FF00FF }, // 17
{ -0.1f, 0.1f, 0.2f, 0x00FF00FF }, // 18
{ 0.1f, 0.1f, 0.2f, 0x00FF00FF }, // 19
{ -0.1f, -0.1f, -0.0f, 0x00FFFFFF }, // 20
{ 0.1f, -0.1f, -0.0f, 0x00FFFFFF }, // 21
{ -0.1f, -0.1f, 0.2f, 0x00FFFFFF }, // 22
{ 0.1f, -0.1f, 0.2f, 0x00FFFFFF }, // 23
};
short indexes[] = {
0, 1, 2, // FRONT QUAD
2, 1, 3,
5, 4, 6, // BACK QUAD
5, 6, 7,
8, 9, 11, // RIGHT QUAD
8, 11, 10,
13, 12,14, // LEFT QUAD
13, 14, 15,
18,16, 17, // TOP QUAD
18, 17, 19,
22, 23, 21, // BOTTOM QUAD
22, 21, 20
};
After much research I've found my own solution:
The line
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], 0, 1, ovrProjection_None);
Creates the projection Matrix.
A was simply a case of setting the projection depth correctly
float zNear = 0.1f;
float zFar = 1000.0f;
ovrMatrix4f p = ovrMatrix4f_Projection(ld.Fov[eye], zNear, zFar, ovrProjection_None);
Amazing how, now 7 weeks of investigating, revealed the flaw as a simple ommission!
I am new to DirectX. I now have a to some extent silly question.
I am using Windows 8.1 DirectX 11.2 and I am using right-hand coordinate system.
I tried to apply the texture to an equilateral triangle in x-y plane and centred at (0,0,0).
But the output shape is distorted(It doesn't look like equilateral triangle at all!!). And In theory if looking from x-axis, I can see nothing because the equilateral triangle locates in the x-y plane. But it turns out that I can see the triangle. In addition, if I change the value of normal vectors, the output shape changes too! I do not understand why and please help!
Here is the view matrix configuration:
static const XMVECTORF32 eye = { 0.0f, 0.0f, 1.5f, 0.0f };
static const XMVECTORF32 at = { 0.0f, 0.0f, 0.0f, 0.0f };
static const XMVECTORF32 up = { 0.0f, 1.0f, 0.0f, 0.0f };
Here is the vertexshader:
cbuffer ModelViewProjectionConstantBuffer : register(b0)
{
matrix model;
matrix view;
matrix projection;
};
struct VertexShaderInput
{
float3 pos : POSITION;
float3 norm : NORMAL;
float2 tex : TEXCOORD0;
};
struct PixelShaderInput
{
float4 pos : SV_POSITION;
float3 norm : NORMAL;
float2 tex : TEXCOORD0;
};
PixelShaderInput main(VertexShaderInput input)
{
PixelShaderInput vertexShaderOutput;
float4 pos = float4(input.pos, 1.0f);
pos = mul(pos, model);
pos = mul(pos, view);
pos = mul(pos, projection);
vertexShaderOutput.pos = pos;
vertexShaderOutput.tex = input.tex;
vertexShaderOutput.norm = mul(float4(normalize(input.norm), 0.0f), model).xyz;
return vertexShaderOutput;
}
Here is the pixelshader:
Texture2D Texture : register(t0);
SamplerState Sampler : register(s0);
struct PixelShaderInput
{
float4 pos : SV_POSITION;
float3 norm : NORMAL;
float2 tex : TEXCOORD0;
};
float4 main(PixelShaderInput input) : SV_TARGET
{
float3 lightDirection = normalize(float3(0, 0, -1));
return Texture.Sample(Sampler, input.tex); //* (0.8f * saturate(dot(normalize(input.norm), -lightDirection)) + 0.2f);
}
Here is the coordinates:
VertexPositionTexture vertexPositionTexture[] =
{
{ XMFLOAT3(-1.5, -0.5*sqrtf(3), 0.0f), XMFLOAT3(0.0f, 0.0f, 0.4f), XMFLOAT2(0.0, 1.0) },
{ XMFLOAT3(0.0f, sqrtf(3), 0.0f), XMFLOAT3(0.0f, 0.0f, 0.4f), XMFLOAT2(0.5, 0.0) },
{ XMFLOAT3(1.5, -0.5*sqrtf(3), 0.0f), XMFLOAT3(0.0f, 0.0f, 0.4f), XMFLOAT2(1.0, 1.0) },
}
The index array would simply be {0,1,2} in clockwise order.
So if I change the normal vector value in vertexPositionTexture XMFLOAT3(0.0, 0.0, 0.4) to XMFLOAT3(0.0, 0.0, -1), the shape will definitely change. I don't know why?
Here is how I create the DeviceDependentResources:
void TextureSceneRenderer::CreateDeviceDependentResources()
{
// Load shaders asynchronously.
auto loadVSTask = DX::ReadDataAsync(L"TextureVertexShader.cso");
auto loadPSTask = DX::ReadDataAsync(L"TexturePixelShader.cso");
BasicLoader^ loader = ref new BasicLoader(m_deviceResources->GetD3DDevice());
loader->LoadTexture(
L"cat.dds",
&m_texture,
&m_textureSRV
);
// create the sampler
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.Filter = D3D11_FILTER_ANISOTROPIC;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
samplerDesc.MipLODBias = 0.0f;
// use 4x on feature level 9.2 and above, otherwise use only 2x
samplerDesc.MaxAnisotropy = 8;
samplerDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
samplerDesc.BorderColor[0] = 0.0f;
samplerDesc.BorderColor[1] = 0.0f;
samplerDesc.BorderColor[2] = 0.0f;
samplerDesc.BorderColor[3] = 0.0f;
// allow use of all mip levels
samplerDesc.MinLOD = 0;
samplerDesc.MaxLOD = D3D11_FLOAT32_MAX;
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateSamplerState(
&samplerDesc,
&m_sampler)
);
// After the vertex shader file is loaded, create the shader and input layout.
auto createVSTask = loadVSTask.then([this](const std::vector<byte>& fileData) {
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateVertexShader(
&fileData[0],
fileData.size(),
nullptr,
&m_vertexShader
)
);
static const D3D11_INPUT_ELEMENT_DESC vertexDesc[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateInputLayout(
vertexDesc,
ARRAYSIZE(vertexDesc),
&fileData[0],
fileData.size(),
&m_inputLayout
)
);
});
// After the pixel shader file is loaded, create the shader and constant buffer.
auto createPSTask = loadPSTask.then([this](const std::vector<byte>& fileData) {
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreatePixelShader(
&fileData[0],
fileData.size(),
nullptr,
&m_pixelShader
)
);
CD3D11_BUFFER_DESC constantBufferDesc(sizeof(ModelViewProjectionConstantBuffer), D3D11_BIND_CONSTANT_BUFFER);
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&constantBufferDesc,
nullptr,
&m_constantBuffer
)
);
});
// Once both shaders are loaded, create the mesh.
auto createCubeTask = (createPSTask && createVSTask).then([this]() {
D3D11_SUBRESOURCE_DATA vertexBufferData = { 0 };
vertexBufferData.pSysMem = this->model->getVertexPositionTexture();
vertexBufferData.SysMemPitch = 0;
vertexBufferData.SysMemSlicePitch = 0;
CD3D11_BUFFER_DESC vertexBufferDesc(sizeof(this->model- >getVertexPositionTexture()[0])*this->model->n_texture_vertex, D3D11_BIND_VERTEX_BUFFER);
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&vertexBufferDesc,
&vertexBufferData,
&m_vertexBuffer
)
);
m_indexCount = this->model->n_mesh;//ARRAYSIZE(cubeIndices);
D3D11_SUBRESOURCE_DATA indexBufferData = { 0 };
indexBufferData.pSysMem = this->model->getMeshTextureIndex();
indexBufferData.SysMemPitch = 0;
indexBufferData.SysMemSlicePitch = 0;
CD3D11_BUFFER_DESC indexBufferDesc(sizeof(this->model->getMeshTextureIndex()[0])*m_indexCount, D3D11_BIND_INDEX_BUFFER);
DX::ThrowIfFailed(
m_deviceResources->GetD3DDevice()->CreateBuffer(
&indexBufferDesc,
&indexBufferData,
&m_indexBuffer
)
);
});
// Once the cube is loaded, the object is ready to be rendered.
createCubeTask.then([this]() {
m_loadingComplete = true;
});
}
Here is the render function:
void TextureSceneRenderer::Render()
{
// Loading is asynchronous. Only draw geometry after it's loaded.
if (!m_loadingComplete)
{
return;
}
auto context = m_deviceResources->GetD3DDeviceContext();
// Set render targets to the screen.
ID3D11RenderTargetView *const targets[1] = { m_deviceResources->GetBackBufferRenderTargetView() };
context->OMSetRenderTargets(1, targets, m_deviceResources->GetDepthStencilView());
// Prepare the constant buffer to send it to the graphics device.
context->UpdateSubresource(
m_constantBuffer.Get(),
0,
NULL,
&m_constantBufferData,
0,
0
);
// Each vertex is one instance of the VertexPositionColor struct.
UINT stride = sizeof(VertexPositionColor);
UINT offset = 0;
context->IASetVertexBuffers(
0,
1,
m_vertexBuffer.GetAddressOf(),
&stride,
&offset
);
context->IASetIndexBuffer(
m_indexBuffer.Get(),
DXGI_FORMAT_R16_UINT, // Each index is one 16-bit unsigned integer (short).
0
);
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
context->IASetInputLayout(m_inputLayout.Get());
// Attach our vertex shader.
context->VSSetShader(
m_vertexShader.Get(),
nullptr,
0
);
// Send the constant buffer to the graphics device.
context->VSSetConstantBuffers(
0,
1,
m_constantBuffer.GetAddressOf()
);
// Attach our pixel shader.
context->PSSetShader(
m_pixelShader.Get(),
nullptr,
0
);
context->PSSetShaderResources(
0,
1,
m_textureSRV.GetAddressOf()
);
context->PSSetSamplers(
0, // starting at the first sampler slot
1, // set one sampler binding
m_sampler.GetAddressOf()
);
// Draw the objects.
context->DrawIndexed(
m_indexCount,
0,
0
);
}