Docs suggest, that default usage textures can be mapped on UMA architectures like Intel integrated graphics with Direct3D 11.3.
I tried to achieve this, but Map() always fails with E_INVALIDARG.
I am quite new to C++ and DirectX, but below is what I believe to be a minimal test case. Please don't hesitate to point out any stupidity I am committing.
I am running this on a notebook with Windows 10 1809, Intel Skylake i5-6300U with HD Graphics 520.
#include "pch.h"
#include <iostream>
#include <dxgi1_6.h>
#include <d3d.h>
#include <d3d11_4.h>
#include <assert.h>
int main()
{
HRESULT res = S_OK;
ID3D11Device *Dev = nullptr;
ID3D11DeviceContext *Ctx = nullptr;
D3D_FEATURE_LEVEL Fl;
D3D_FEATURE_LEVEL fls[1] = { D3D_FEATURE_LEVEL_11_1 };
res = D3D11CreateDevice(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_DEBUG | D3D11_CREATE_DEVICE_BGRA_SUPPORT, fls, 1, D3D11_SDK_VERSION, &Dev, &Fl, &Ctx);
assert(res == S_OK);
assert(Fl == D3D_FEATURE_LEVEL_11_1);
ID3D11Device5 *Dev5 = nullptr;
res = Dev->QueryInterface<ID3D11Device5>(&Dev5);
assert(res == S_OK);
Dev->Release();
Dev = nullptr;
ID3D11DeviceContext4 *Ctx4;
res = Ctx->QueryInterface<ID3D11DeviceContext4>(&Ctx4);
assert(res == S_OK);
Ctx->Release();
Ctx = nullptr;
D3D11_FEATURE_DATA_D3D11_OPTIONS2 opts2;
res = Dev5->CheckFeatureSupport(D3D11_FEATURE_D3D11_OPTIONS2, &opts2, sizeof(opts2));
assert(res == S_OK);
assert(opts2.MapOnDefaultTextures);
assert(opts2.UnifiedMemoryArchitecture);
D3D11_TEXTURE2D_DESC1 texDesc = { 0 };
texDesc.ArraySize = 1;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE | D3D11_BIND_UNORDERED_ACCESS;
texDesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.Height = 256;
texDesc.Width = 256;
texDesc.MipLevels = 1;
texDesc.MiscFlags = 0;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.TextureLayout = D3D11_TEXTURE_LAYOUT_UNDEFINED;
texDesc.Usage = D3D11_USAGE_DEFAULT;
byte mem[256 * 256 * 4];
ZeroMemory(mem, 256 * 256 * 4);
D3D11_SUBRESOURCE_DATA data = { 0 };
data.pSysMem = mem;
data.SysMemPitch = 256 * 4;
ID3D11Texture2D1 *tex2d;
res = Dev5->CreateTexture2D1(&texDesc, &data, &tex2d);
assert(res == S_OK);
D3D11_MAPPED_SUBRESOURCE map = { 0 };
// I believe at least one of these should succeed, but all fail
res = Ctx4->Map(tex2d, 0, D3D11_MAP_READ, 0, &map);
//res = Ctx4->Map(tex2d, 0, D3D11_MAP_WRITE, 0, &map);
//res = Ctx4->Map(tex2d, 0, D3D11_MAP_READ_WRITE, 0, &map);
assert(res == S_OK); // E_INVALIDARG
}
I believe the Map() call should succeed, but it fails with E_INVALIDARG.
EDIT: I tried D3D11_TEXTURE_LAYOUT_ROW_MAJOR and D3D11_TEXTURE_LAYOUT_64K_STANDARD_SWIZZLE too, but then CreateTexture2D1() fails with E_INVALIDARG. Maybe my hardware doesn't support those modes?
I think the issue is described in the documentation:
It is illegal to set CPU access flags on default textures without also setting TextureLayout to a value other than D3D11_TEXTURE_LAYOUT_UNDEFINED.
Related
I'm new to DirectX and have been reading tons of tutorials and samples, however I'm unable to find any documentation on how to directly display an image that is loaded into a Texture2D on the screen. Almost all tutorials I've seen deal with 3D graphics, shaders, etc. However, I just want to display the contents of the texture.
Here's what I have so far:
DeviceResources.cpp:
#include "DeviceResources.h"
#include "Renderer.h"
DeviceResources::DeviceResources()
{
}
HRESULT DeviceResources::CreateDeviceResources(HWND hwnd)
{
D3D_FEATURE_LEVEL levels[] = {
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_11_1,
D3D_FEATURE_LEVEL_12_0,
D3D_FEATURE_LEVEL_12_1
};
UINT flags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
DXGI_SWAP_CHAIN_DESC swap_chain_desc;
ZeroMemory(&swap_chain_desc, sizeof(DXGI_SWAP_CHAIN_DESC));
swap_chain_desc.Windowed = TRUE;
swap_chain_desc.BufferCount = 2;
swap_chain_desc.BufferDesc.Format = DXGI_FORMAT_R10G10B10A2_UNORM;
swap_chain_desc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swap_chain_desc.SampleDesc.Count = 1;
swap_chain_desc.SampleDesc.Quality = 0;
swap_chain_desc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
swap_chain_desc.OutputWindow = hwnd;
Microsoft::WRL::ComPtr<ID3D11Device> device;
Microsoft::WRL::ComPtr<ID3D11DeviceContext> context;
Microsoft::WRL::ComPtr<IDXGISwapChain> swapChain;
D3D11CreateDeviceAndSwapChain(
nullptr,
D3D_DRIVER_TYPE_HARDWARE,
nullptr,
flags,
levels,
ARRAYSIZE(levels),
D3D11_SDK_VERSION,
&swap_chain_desc,
swapChain.GetAddressOf(),
device.GetAddressOf(),
&m_feature_level,
context.GetAddressOf()
);
device.As(&m_device);
context.As(&m_context);
swapChain.As(&m_swapChain);
cv::directx::ocl::initializeContextFromD3D11Device(m_device.Get());
auto hdr = Renderer::HDRMetadata();
m_swapChain->SetHDRMetaData(DXGI_HDR_METADATA_TYPE_HDR10, sizeof(DXGI_HDR_METADATA_HDR10), &hdr);
m_swapChain->SetColorSpace1(DXGI_COLOR_SPACE_RGB_FULL_G2084_NONE_P2020);
m_swapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), static_cast<void**>(& m_backBuffer));
m_backBuffer->GetDesc(&m_bbDesc);
ZeroMemory(&m_viewport, sizeof(D3D11_VIEWPORT));
m_viewport.Height = static_cast<float>(m_bbDesc.Height);
m_viewport.Width = static_cast<float>(m_bbDesc.Width);
m_viewport.MinDepth = 0;
m_viewport.MaxDepth = 1;
m_context->RSSetViewports(1, &m_viewport);
m_device->CreateRenderTargetView(m_backBuffer.Get(), nullptr, m_renderTargetView.GetAddressOf());
return S_OK;
}
HRESULT DeviceResources::ConfigureBackBuffer()
{
m_swapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), static_cast<void**>(& m_backBuffer));
m_device->CreateRenderTargetView(m_backBuffer.Get(), nullptr, m_renderTargetView.GetAddressOf());
m_backBuffer->GetDesc(&m_bbDesc);
ZeroMemory(&m_viewport, sizeof(D3D11_VIEWPORT));
m_viewport.Height = static_cast<float>(m_bbDesc.Height);
m_viewport.Width = static_cast<float>(m_bbDesc.Width);
m_viewport.MinDepth = 0;
m_viewport.MaxDepth = 1;
m_context->RSSetViewports(1, &m_viewport);
return S_OK;
}
HRESULT DeviceResources::ReleaseBackBuffer()
{
m_renderTargetView.Reset();
m_backBuffer.Reset();
m_context->Flush();
return S_OK;
}
HRESULT DeviceResources::SetFullscreen(bool fullscreen)
{
m_swapChain->SetFullscreenState(fullscreen, nullptr);
ReleaseBackBuffer();
m_swapChain->ResizeBuffers(0, 0, 0, DXGI_FORMAT_UNKNOWN, 0);
ConfigureBackBuffer();
return S_OK;
}
void DeviceResources::Present()
{
m_swapChain->Present(1, 0);
}
DeviceResources::~DeviceResources()
= default;
Renderer.cpp:
#include "Renderer.h"
#include <utility>
#include <comdef.h>
#include <vector>
Renderer::Renderer(std::shared_ptr<DeviceResources> resources) : m_resources(std::move(resources)), m_frameCount(0)
{
m_frameCount = 0;
}
HRESULT Renderer::CreateDeviceDependentResources()
{
return S_OK;
}
HRESULT Renderer::CreateWindowSizeDependentResources()
{
return S_OK;
}
void Renderer::Update()
{
//
}
void Renderer::Render()
{
cv::Mat mat = cv::imread("C:/Users/Richard/Downloads/orig_cave_L.ppm", cv::IMREAD_ANYCOLOR | cv::IMREAD_ANYDEPTH);
cv::Mat as4channelMat(mat.size(), CV_MAKE_TYPE(mat.depth(), 4));
int conversion[] = { 0, 0, 1, 1, 2, 2, -1, 3 };
cv::mixChannels(&mat, 1, &as4channelMat, 1, conversion, 4);
D3D11_TEXTURE2D_DESC desc;
desc.Width = 3840;
desc.Height = 2160;
desc.MipLevels = desc.ArraySize = 1;
desc.Format = DXGI_FORMAT_R16G16B16A16_UNORM;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.Usage = D3D11_USAGE_DEFAULT;
desc.BindFlags = 0;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE | D3D11_CPU_ACCESS_READ;
desc.MiscFlags = 0;
ID3D11Texture2D* tex = nullptr;
auto hr = m_resources->GetDevice()->CreateTexture2D(&desc, nullptr, &tex);
if FAILED(hr)
{
_com_error err(hr);
auto errMsg = err.ErrorMessage();
}
try {
cv::directx::convertToD3D11Texture2D(as4channelMat, tex);
} catch (cv::Exception& e)
{
std::cerr << "ERROR: " << e.msg << std::endl;
throw e;
}
auto hr3 = m_resources->m_device->CreateShaderResourceView(tex.Get(), nullptr, m_texture.GetAddressOf());
if FAILED(hr3)
{
_com_error err(hr3);
auto errMsg = err.ErrorMessage();
}
std::unique_ptr<DirectX::SpriteBatch> m_spriteBatch;
DirectX::SimpleMath::Vector2 m_screenPos, m_origin;
m_spriteBatch = std::make_unique<DirectX::SpriteBatch>(m_resources->m_context.Get());
CD3D11_TEXTURE2D_DESC catDesc;
tex->GetDesc(&catDesc);
m_origin.x = float(catDesc.Width / 2);
m_origin.y = float(catDesc.Height / 2);
m_screenPos.x = m_resources->m_bbDesc.Width / 2.f;
m_screenPos.y = m_resources->m_bbDesc.Height / 2.f;
m_spriteBatch->Begin();
m_spriteBatch->Draw(
m_texture.Get(),
m_screenPos,
nullptr,
DirectX::Colors::White,
0.0f,
m_origin
);
m_spriteBatch->End();
}
DXGI_HDR_METADATA_HDR10 Renderer::HDRMetadata()
{
//
}
Renderer::~Renderer()
{
}
From my understand, I have to somehow create a "Quad", apply the texture to it, and then display the quad itself. However,I am unsure how to do any of this and can't find any resources to help.
Edit: Given the recommendations, I have tried using DirectXTK, specifically SpriteBatch. I followed the relevant instructions in the documentation, however Draw doesn't seem to do / display anything. (In Renderer.cpp)
My build environment is as follows:
Windows 8.1, VS2012, desktop application built using windows 8.0 SDK and C++.
When I run my program on windows 8.1 the RowPitch prints 2560 but under windows 10 the same program prints 5120.
What am I doing wrong here?
Here is the code, Thanks for all the replies.
#include <d3d11.h>
static bool init_directx11(ID3D11Device **pDevice, ID3D11DeviceContext **pDeviceContext)
{
D3D_FEATURE_LEVEL featureLevels[] = {D3D_FEATURE_LEVEL_11_0, D3D_FEATURE_LEVEL_10_1, D3D_FEATURE_LEVEL_10_0, D3D_FEATURE_LEVEL_9_1};
D3D_FEATURE_LEVEL featureLevel;
UINT createDeviceFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
HRESULT hr = D3D11CreateDevice(NULL, D3D_DRIVER_TYPE_HARDWARE, NULL, createDeviceFlags, featureLevels, ARRAYSIZE(featureLevels), D3D11_SDK_VERSION, pDevice,
&featureLevel, pDeviceContext);
return SUCCEEDED(hr);
}
int _tmain(int argc, _TCHAR* argv[])
{
ID3D11Device *pDevice = nullptr;
ID3D11DeviceContext *pDeviceContext= nullptr;
if (!init_directx11(&pDevice, &pDeviceContext))
{
return FALSE;
}
D3D11_TEXTURE2D_DESC desc;
ZeroMemory(&desc, sizeof(D3D11_TEXTURE2D_DESC));
desc.ArraySize = 1;
desc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
desc.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
desc.Format = DXGI_FORMAT_YUY2;
desc.MipLevels = 1;
desc.MiscFlags = 0;
desc.SampleDesc.Count = 1;
desc.SampleDesc.Quality = 0;
desc.Usage = D3D11_USAGE_DYNAMIC;
desc.Width = 1280;
desc.Height = 720;
ID3D11Texture2D* pTexture2D = nullptr;
HRESULT hr = pDevice->CreateTexture2D(&desc, NULL, &pTexture2D);
D3D11_MAPPED_SUBRESOURCE mappedResource;
ZeroMemory(&mappedResource, sizeof(DXGI_MAPPED_RECT));
hr = pDeviceContext->Map(pTexture2D, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
printf("RowPitch = %d\n", mappedResource.RowPitch);
pDeviceContext->Unmap(pTexture2D, 0);
pTexture2D->Release();
pDeviceContext->Release();
pDevice->Release();
getchar();
}
What am I doing wrong here?
This is not necessarily wrong. RowPitch depends on layout the hardware and driver assigned for the texture. The pitch might vary. You are supposed to read the pitch back once the resource is mapped, and use it respectively to read or write the data.
See this thread and message for pitch use code snippet:
The texture resource will have it's own pitch (the number of bytes in a row), which is probably different than the pitch of your source data. This pitch is given to you as the "RowPitch" member of D3D11_MAPPED_SUBRESOURCE. So typically you do something like this:
BYTE* mappedData = reinterpret_cast<BYTE*>(mappedResource.pData);
for(UINT i = 0; i < height; ++i)
{
memcpy(mappedData, buffer, rowspan);
mappedData += mappedResource.RowPitch;
buffer += rowspan;
}
Everytime I try to create the swapChain it throws this error.
After hours searching for a fix for this I found nothing that worked for me.
Here's the important part of the code:
bool Direct3D::Initialize(HWND hWnd)
{
HRESULT hResult;
ID3D11Device* pDevice = NULL;
ID3D11DeviceContext* pDeviceContext = NULL;
IDXGIDevice* pDXGIDevice = NULL;
IDXGIAdapter* pAdapter = NULL;
IDXGIFactory* pFactory = NULL;
IDXGISwapChain* pSwapChain = NULL;
D3D_FEATURE_LEVEL featureLevels[] = { //Add feature levels to support here
D3D_FEATURE_LEVEL_11_0
};
#ifdef _DEBUG
UINT deviceFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT | D3D11_CREATE_DEVICE_DEBUG;
#else
UINT deviceFlags = D3D11_CREATE_DEVICE_BGRA_SUPPORT;
#endif
//Create the device and deviceContext
hResult = D3D11CreateDevice(NULL, //needs to be NULL if D3D_DRIVER_TYPE_HARDWARE is used; NULL takes the default adapter
D3D_DRIVER_TYPE_HARDWARE,
NULL, //needs to be not NULL if D3D_DRIVER_TYPE_SOFTWARE is used
deviceFlags,
featureLevels,
ARRAYSIZE(featureLevels),
D3D11_SDK_VERSION,
&pDevice,
NULL,
&pDeviceContext);
if (FAILED(hResult))
{
return false;
}
hResult = pDevice->QueryInterface(__uuidof(IDXGIDevice), (void**)&pDXGIDevice);
if (FAILED(hResult))
{
return false;
}
hResult = pDXGIDevice->GetAdapter(&pAdapter);
if (FAILED(hResult))
{
return false;
}
hResult = pAdapter->GetParent(__uuidof(IDXGIFactory), (void**)&pFactory);
if (FAILED(hResult))
{
return false;
}
DXGI_MODE_DESC bufferDesc;
ZeroMemory(&bufferDesc, sizeof(DXGI_MODE_DESC));
bufferDesc.Width = 0; //Zero for evaluating it from the output window
bufferDesc.Height = 0; //Zero for evaluating it from the output window
bufferDesc.RefreshRate.Numerator = config.refreshRate;
bufferDesc.RefreshRate.Denominator = 1;
bufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
bufferDesc.ScanlineOrdering = DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED;
bufferDesc.Scaling = DXGI_MODE_SCALING_UNSPECIFIED;
DXGI_SWAP_CHAIN_DESC swapChainDesc;
ZeroMemory(&swapChainDesc, sizeof(DXGI_SWAP_CHAIN_DESC));
swapChainDesc.BufferDesc = bufferDesc;
swapChainDesc.SampleDesc.Count = 1;
swapChainDesc.SampleDesc.Quality = 0;
swapChainDesc.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
swapChainDesc.BufferCount = 1;
swapChainDesc.OutputWindow = hWnd;
swapChainDesc.Windowed = config.fullscreen;
swapChainDesc.SwapEffect = DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL;
swapChainDesc.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH | DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY;
hResult = pFactory->CreateSwapChain(pDevice, &swapChainDesc, &pSwapChain);
CGE_SAFE_RELEASE(pDXGIDevice);
CGE_SAFE_RELEASE(pAdapter);
CGE_SAFE_RELEASE(pFactory);
if (FAILED(hResult))
{
return false;
}
return true;
}
Looking at the documentation for CreateSwapChain() it seems to be that pSwapChain has to be not NULL, but that doesn't make sense in my opinion because I want to specify pSwapChain with CreateSwapChain().
Does anyone know a solution for this problem?
Your IDXGISwapChain should be associated with SwapChainPanel XAML control(or HWND if you are running Win32 application). You can do initialization like this:
hr = dxgiFactory2->CreateSwapChainForHwnd( g_pd3dDevice, g_hWnd, &sd, nullptr, nullptr, &g_pSwapChain1 );
if (SUCCEEDED(hr))
{
hr = g_pSwapChain1->QueryInterface( __uuidof(IDXGISwapChain), reinterpret_cast<void**>(&g_pSwapChain) );
}
This code is from microsoft Win32 DirectX sample.
https://code.msdn.microsoft.com/windowsdesktop/Direct3D-Tutorial-Win32-829979ef/view/Discussions#content
If you are running WinRT application you can look trough DirectX and XAML application template.
You are passing in the address of your Swap Chain Pointer. This is so the Create device and swap chain function can fill that pointer out with information. Here is a example.
//loop through our driver types till we find the one we will be using
for (unsigned int i = 0; i < DriverCount; i++)
{
//Create our device and swap chain
DXERROR = D3D11CreateDeviceAndSwapChain(nullptr, drivers[i], nullptr,
Flag, levels, LevelsCount, D3D11_SDK_VERSION, &SwapDesc, &DX.pSwapChain,
&DX.pDevice, &DX.FeatureLevel, &DX.pImmediateContext);
if (SUCCEEDED(DXERROR))
{
DX.DriverType = drivers[i];
break;
}
}
I use CheckMultisampleQualityLevels(...) to establish the MSAA support on my hardware. I do it in that order:
D3D11CreateDevice(...) gives me device
device->CheckMultisampleQualityLevels(...)
Pass results to DXGI_SWAP_CHAIN_DESC.SampleDesc
CreateSwapChain(...) with given DXGI_SWAP_CHAIN_DESC
The problem is, CheckMultisampleQualityLevels(...) always gives me 0 for pNumQualityLevels. And I'm sure that my graphic card supports some MSAA (I've tested the program on GeForce gtx 780 and others with the same result).
Did I miss something? Should I call something else before CheckMultisampleQualityLevels(...)?
The code:
Create device:
UINT createDeviceFlags = 0;
#ifdef DEBUG_DIRECTX
createDeviceFlags |= D3D11_CREATE_DEVICE_DEBUG;
#endif
D3D_DRIVER_TYPE driverTypes[] = {
D3D_DRIVER_TYPE_HARDWARE,
D3D_DRIVER_TYPE_WARP,
D3D_DRIVER_TYPE_REFERENCE,
};
std::string driverTypesNames[] = {
"D3D_DRIVER_TYPE_HARDWARE",
"D3D_DRIVER_TYPE_WARP",
"D3D_DRIVER_TYPE_REFERENCE",
};
UINT numDriverTypes = ARRAYSIZE(driverTypes);
D3D_FEATURE_LEVEL featureLevels[] = {
D3D_FEATURE_LEVEL_11_0,
D3D_FEATURE_LEVEL_10_1,
D3D_FEATURE_LEVEL_10_0,
};
std::string featureLevelsNames[] = {
"D3D_FEATURE_LEVEL_11_0",
"D3D_FEATURE_LEVEL_10_1",
"D3D_FEATURE_LEVEL_10_0",
};
UINT numFeatureLevels = ARRAYSIZE(featureLevels);
D3D_FEATURE_LEVEL g_featureLevel = D3D_FEATURE_LEVEL_11_0;
for(UINT driverTypeIndex = 0; driverTypeIndex < numDriverTypes; driverTypeIndex++){
driverType = driverTypes[driverTypeIndex];
result = D3D11CreateDevice(NULL, driverType, NULL, createDeviceFlags, featureLevels, numFeatureLevels, D3D11_SDK_VERSION, &device, &g_featureLevel, &context);
if(SUCCEEDED(result)){
LOG(logDEBUG1, "Driver type: " << driverTypesNames[driverTypeIndex] << ".", MOD_GRAPHIC);
break;
}
}
ERROR_HANDLE(SUCCEEDED(result), L"Could not create device (DirectX 11).", MOD_GRAPHIC);
Check multi-sample quality levels (based on vertexwahn.de article):
sampleCountOut = 1;
maxQualityLevelOut = 0;
for(UINT sampleCount = 1; sampleCount <= D3D11_MAX_MULTISAMPLE_SAMPLE_COUNT; sampleCount++){
UINT maxQualityLevel = 0;
HRESULT hr = device->CheckMultisampleQualityLevels(DXGI_FORMAT_R8G8B8A8_UNORM, sampleCount, &maxQualityLevel);
if(maxQualityLevel > 0){
maxQualityLevel--;
}
ERROR_HANDLE(hr == S_OK, L"CheckMultisampleQualityLevels failed.", MOD_GRAPHIC);
if(maxQualityLevel > 0){
LOG(logDEBUG1, "MSAA " << sampleCount << "X supported with " << maxQualityLevel << " quality levels.", MOD_GRAPHIC);
sampleCountOut = sampleCount;
maxQualityLevelOut = maxQualityLevel;
}
}
Swap chain:
DXGI_SWAP_CHAIN_DESC sd;
ZeroMemory(&sd, sizeof(sd));
sd.BufferCount = 1;
sd.BufferDesc.Width = width;
sd.BufferDesc.Height = height;
sd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
sd.BufferDesc.RefreshRate.Numerator = 60;
sd.BufferDesc.RefreshRate.Denominator = 1;
sd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT;
sd.OutputWindow = *hwnd;
sd.SampleDesc.Count = sampleCount;
sd.SampleDesc.Quality = maxQualityLevel;
sd.Windowed = false;
sd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; // allow full-screen switchin
//based on http://stackoverflow.com/questions/27270504/directx-creating-the-swapchain
IDXGIDevice * dxgiDevice = 0;
HRESULT hr = device->QueryInterface(__uuidof(IDXGIDevice), (void **)& dxgiDevice);
ERROR_HANDLE(SUCCEEDED(hr), L"Query for IDXGIDevice failed.", MOD_GRAPHIC);
IDXGIAdapter * dxgiAdapter = 0;
hr = dxgiDevice->GetParent(__uuidof(IDXGIAdapter), (void **)& dxgiAdapter);
ERROR_HANDLE(SUCCEEDED(hr), L"Could not get IDXGIAdapter.", MOD_GRAPHIC);
IDXGIFactory * dxgiFactory = 0;
hr = dxgiAdapter->GetParent(__uuidof(IDXGIFactory), (void **)& dxgiFactory);
ERROR_HANDLE(SUCCEEDED(hr), L"Could not get IDXGIFactory.", MOD_GRAPHIC);
// This system only has DirectX 11.0 installed (let's assume it)
result = dxgiFactory->CreateSwapChain(device, &sd, &swapChain);
LOG(logDEBUG1, "This system only has DirectX 11.0 installed. CreateSwapChain(...) used.", MOD_GRAPHIC);
ERROR_HANDLE(result == S_OK, L"Could not swap chain.", MOD_GRAPHIC);
My ERROR_HANDLE(...) macro never triggers (the first parameter is true in all cases). The log says I use D3D_DRIVER_TYPE_HARDWARE for driver type.
The DirectX Debuggers says (which is some problem, but I don't think it's the reason for CheckMultisampleQualityLevels(...) to gives me wrong results):
DXGI WARNING: IDXGISwapChain::Present: Fullscreen presentation inefficiencies incurred due to application not using IDXGISwapChain::ResizeBuffers appropriately, specifying a DXGI_MODE_DESC not available in IDXGIOutput::GetDisplayModeList, or not using DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH.DXGI_SWAP_CHAIN_DESC::BufferDesc = { 1600, 900, { 60, 1 }, R8G8B8A8_UNORM, 0, 0 }; DXGI_SWAP_CHAIN_DESC::SampleDesc = { 8, 0 }; DXGI_SWAP_CHAIN_DESC::Flags = 0x2; [ MISCELLANEOUS WARNING #98: ]
Your code subtracts 1 from maxQualityLevels before checking to see whether it's greater than zero. An initial value of 1 would suggest it's valid to create the target at quality level 0.
Assuming you want this to work across vendors you only really need to check for it being > 0 and then just create the surface at Quality = 0.
Quality levels > 0 are vendor specific and can mean any number of things to different GPUs. Nvidia's CSAA and AMD's EQAA are both available through non-zero quality levels, but you'd need to look at their own documentation to figure out what each quality level actually means. They're also functionally slightly different to traditional MSAA. "Quality" is a little misleading in the sense that a greater number doesn't necessarily mean greater quality, it would be more appropriate to call it "Mode"
See both:
http://www.nvidia.com/object/coverage-sampled-aa.html
and
http://developer.amd.com/wordpress/media/2012/10/EQAA%2520Modes%2520for%2520AMD%2520HD%25206900%2520Series%2520Cards.pdf
I'm trying to create D3D texture 2d with STAGING usage.
Always, it fails with error : "Incorrect parameter" (code 0x80070057)...
I don't understand, I can create other than staging no problem, but can't succeed with this... Please help me before my computer goes flying through the window... Please...
Here is the problematic piece of code :
int w = 128;
int h = 128;
ID3D11Texture2D * tex;
D3D11_TEXTURE2D_DESC tdesc;
D3D11_SUBRESOURCE_DATA tbsd;
ZeroMemory(&tdesc, sizeof(D3D10_TEXTURE2D_DESC));
WORD *buf = new WORD[128*128];
for (int i = 0; i < h; i++)
for (int j = 0; j < w; j++)
{
buf[i*128 + j] = (WORD) 0xffffffff;
}
tbsd.pSysMem = (void *)buf;
tbsd.SysMemPitch = w * 4;
tbsd.SysMemSlicePitch = w * h * 4;
tdesc.Width = w;
tdesc.Height = h;
tdesc.MipLevels = 1;
tdesc.ArraySize = 1;
tdesc.SampleDesc.Count = 1;
tdesc.SampleDesc.Quality = 0;
tdesc.Usage = D3D11_USAGE_STAGING;
tdesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
tdesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
tdesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ;
// tdesc.CPUAccessFlags = D3D11_CPU_ACCESS_READ | D3D11_CPU_ACCESS_WRITE; // Does'nt work either...
tdesc.MiscFlags = 0;
HRESULT eblah = 0; char tmpstr[128];
eblah = device->CreateTexture2D(&tdesc, &tbsd, &tex);
if (FAILED(eblah))
{
wsprintfA(tmpstr, "Error code : %lX", eblah); OutputDebugStringA(tmpstr);
ErrorMessage(eblah);
}
I've got this as device, earlier in code, in case of :
ID3D11Device* device = nullptr;
D3D_FEATURE_LEVEL level;
ID3D11DeviceContext* context = nullptr;
HRESULT hr = D3D11CreateDevice(
nullptr,
D3D_DRIVER_TYPE::D3D_DRIVER_TYPE_NULL, // Tried D3D_DRIVER_TYPE_HARDWARE too
nullptr,
0,
nullptr,
0,
D3D11_SDK_VERSION,
&device,
&level,
&context
);
Found it :
_ I used D3D11_CREATE_DEVICE_FLAG::D3D11_CREATE_DEVICE_DEBUG flag when creating the device, which gave me very useful debug info. A must have for every beginner !!!
_ tdesc.BindFlags MUST be 0 when creating a ID3D11Texture2D for staging.
_ My computer didn't fly through the window !!!
Nice day to everyone :)