syntax error: identifier 'TEXTMETRICA' in D3DX10Core - c++

I Have Just Started Learning D3D and my code was working all fine till i implemented the D3D Shader Compiler Stuff to my code.
I am Using the tutorial on DirectXTutorials. if i just copy paste the code from there on to a new project, the program compiles fine.
However i have put my code in different classes unlike the tutorial. It is giving me error when i try to compile my saying: Syntax Error: "TextMetrica" (Compiling Direct3DRenderer.cpp).
Here is the Direct3DRenderer File:
#include "Window.h"
#include "Direct3DRenderer.h"
#include "Vertex.h"
Renderer::Renderer(HWND hw)
{
OutputDebugString("Direct3D Initializing\n");
DXGI_SWAP_CHAIN_DESC scd;
ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC)); // ZERO OUT SCD
scd.BufferCount = 1; // HOW MANY BACKBUFFERS WE WANT
scd.OutputWindow = hw; // HANDLE TO THE OUTPUT WINDOW
scd.Windowed = true; // SHOULD WINDOW BE IN WINDOWED MODE BY DEFAULT
scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // BUFFER FORMAT
scd.BufferDesc.Width = 800; // BUFFER WIDTH
scd.BufferDesc.Height = 600; // BUFFER HEIGHT
scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; // USE SWAP CHAIN AS OUTPUT TARGET
scd.SampleDesc.Count = 4; // MSAA COUNT
scd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH; // FLAGS
if (D3D11CreateDeviceAndSwapChain(
NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
NULL,
NULL,
NULL,
D3D11_SDK_VERSION,
&scd,
&swapchain,
&dev,
NULL,
&context
) == SEVERITY_SUCCESS)
{
OutputDebugString("SUCCESS\n");
// Get The Address of BackBuffer
ID3D11Texture2D* pbuffer;
swapchain->GetBuffer(0, _uuidof(ID3D11Texture2D), (LPVOID*)& pbuffer);
// Create a Render Target COM Object from the buffer
dev->CreateRenderTargetView(pbuffer, NULL, &RenderTarget);
pbuffer->Release();
// Set Our RenderTarget as the back buffer
context->OMSetRenderTargets(1, &RenderTarget, NULL);
// Create Our Viewport
viewport.Height = 800;
viewport.Width = 600;
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
context->RSSetViewports(1, &viewport);
InitPipeline();
InitGraphics();
}
else
{
OutputDebugString("ERROR\n");
}
}
Renderer::~Renderer()
{
OutputDebugString("Direct3D Cleanup Phase Started.\n");
swapchain->SetFullscreenState(FALSE, NULL);
swapchain->Release();
context->Release();
RenderTarget->Release();
VS->Release();
PS->Release();
dev->Release();
OutputDebugString("Direct3D Cleanup Phase Completed.\n");
}
void Renderer::InitPipeline()
{
// Compile Shaders from file
D3DX11CompileFromFile("shaders.shader", 0, 0, "VShader", "vs_4_0", 0, 0, 0, &compiled_vs, 0, 0);
D3DX11CompileFromFile("shaders.shader", 0, 0, "PShader", "ps_4_0", 0, 0, 0, &compiled_ps, 0, 0);
// Convert Compiled Shaders to COM Shader Objects
dev->CreateVertexShader(compiled_vs->GetBufferPointer(), compiled_vs->GetBufferSize(), NULL, &VS);
dev->CreatePixelShader(compiled_ps->GetBufferPointer(), compiled_ps->GetBufferSize(), NULL, &PS);
// Sets the shaders to the device / Activates the shader
context->VSSetShader(VS, 0, 0);
context->PSSetShader(PS, 0, 0);
// Create the Input Layout
D3D11_INPUT_ELEMENT_DESC VertexElementDesc[] = {
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0}
};
dev->CreateInputLayout(VertexElementDesc, 2, compiled_vs->GetBufferPointer(), compiled_vs->GetBufferSize(), &InputLayout);
context->IASetInputLayout(InputLayout);
}
void Renderer::InitGraphics() {
// Create Buffer so we can duplicate data from system memory to graphics memory
ZeroMemory(&VBufferDesc, sizeof(VBufferDesc));
VBufferDesc.ByteWidth = sizeof(Vertex) * 3;
VBufferDesc.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
VBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
VBufferDesc.Usage = D3D11_USAGE_DYNAMIC;
dev->CreateBuffer(&VBufferDesc, NULL, &VBuffer);
Vertex OurVertices[] =
{
{0.0f, 0.5f, 0.0f, D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f)},
{0.45f, -0.5, 0.0f, D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f)},
{-0.45f, -0.5f, 0.0f, D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f)}
};
// we need to map to avoid issues
D3D11_MAPPED_SUBRESOURCE mapRes;
context->Map(VBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &mapRes);
memcpy(mapRes.pData, OurVertices, sizeof(OurVertices));
context->Unmap(VBuffer, NULL);
}
void Renderer::RenderFrame()
{
context->ClearRenderTargetView(RenderTarget, D3DXCOLOR(0.2, 0.4, 0.6, 1.0));
// We can do the rendering here
UINT stride = sizeof(Vertex);
UINT offset = 0;
context->IASetVertexBuffers(0, 1, &VBuffer, &stride, &offset);
context->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
context->Draw(3, 0);
// Swap Buffer
swapchain->Present(0, 0);
}
The TEXTMETRICA Identifier error is actually in D3DX10Core.h. i have peeked in the definition and that file seems to be giving me the error. this identifier's definition should be in gdi file.
I have added the include paths and lib paths to the directx(June 2010) SDK and also tried specifying d3dx10.lib, d3dx11.lib, d3d11.lib in the project's addition dependency on the all configurations setting. I am new so i dont know what i am doing wrong. if any more code is required please comment about it.

Keep in mind that most of the Internet tutorials for DirectX 11 are a bit outdated. In particular, you don't actually need the legacy DirectX SDK. See this blog post, this one, and this one.
If using VS 2015/2017/2019 and you still want to use the legacy DirectX SDK, then you have to set up the include/lib paths in a particular way or you'll get problems. The details on Microsoft Docs.
You are welcome to use these older tutorials with some caveats, but you should also take a look at DirectX Tool Kit for some more 'modern' tutorials.
UPDATE: One other option for using the legacy D3DX9, D3DX10, and D3DX11 utility libraries is to use the Microsoft.DXSDK.D3DX NuGet package. This removes many of the quirks around mixing it with the modern Windows SDK and Visual C++ toolsets. It also includes the redistributable binaries with a simple side-by-side license instead of having to use legacy DXSETUP. That said, the binaries themselves are still quite ancient, have known bugs, and are no longer supported, so YMMV. See this blog post.

Related

DirectX 12 doesn't draw triangle

I'm learning DirectX 12. I would like to draw a triangle, but DirectX 12 doesn't draw it. I wonder if there's something I forgot.
This is Init() part.
void Engine::Init() {
mDevice = new Device();
mCommandQueue = new CommandQueue();
mSwapChain = new SwapChain();
mDescriptorHeap = new DescriptorHeap();
mRootSignature = new RootSignature();
mDevice->Init();
mCommandQueue->Init(mDevice->GetDevice());
mSwapChain->Init(mDevice->GetDevice(), mDevice->GetFactory(), mCommandQueue->GetCommandQueue(), mWndInfo);
mDescriptorHeap->Init(mDevice->GetDevice(), mSwapChain);
mRootSignature->Init(mDevice->GetDevice());
mesh = new Mesh();
shader = new Shader(mRootSignature);
D3D12_INPUT_ELEMENT_DESC desc[] = {
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D12_INPUT_CLASSIFICATION_PER_VERTEX_DATA, 0}
};
shader->Init(mDevice->GetDevice(), L"..\\Resources\\Shader\\default.hlsli");
shader->SetPipelineState(mDevice->GetDevice(), desc, _countof(desc));
std::vector<Vertex> vec(3);
vec[0].pos = Vec3(0.0f, 0.5f, 0.5f);
vec[0].color = Vec4(1.0f, 0.0f, 0.0f, 1.0f);
vec[1].pos = Vec3(0.5f, -0.5f, 0.5f);
vec[1].color = Vec4(0.0f, 1.0f, 0.0f, 1.0f);
vec[2].pos = Vec3(-0.5f, -0.5f, 0.5f);
vec[2].color = Vec4(0.0f, 0.0f, 1.0f, 1.0f);
mesh->Init(mDevice->GetDevice(), vec);
mCommandQueue->WaitForPreviousFrame(mSwapChain);
}
void Mesh::Init(ID3D12Device* const device, const std::vector<Vertex>& vec) {
mVertexCount = static_cast<uint32>(vec.size());
const uint32 bufferSize = mVertexCount * sizeof(Vertex);
D3D12_HEAP_PROPERTIES heapProperties = CD3DX12_HEAP_PROPERTIES(D3D12_HEAP_TYPE_UPLOAD);
D3D12_RESOURCE_DESC resourceDesc = CD3DX12_RESOURCE_DESC::Buffer(bufferSize);
ThrowIfFailed(device->CreateCommittedResource(
&heapProperties,
D3D12_HEAP_FLAG_NONE,
&resourceDesc,
D3D12_RESOURCE_STATE_GENERIC_READ,
nullptr,
IID_PPV_ARGS(&mVertexBuffer)));
// Copy the triangle data to the vertex buffer.
void* pVertexDataBuffer = nullptr;
CD3DX12_RANGE readRange(0, 0); // We do not intend to read from this resource on the CPU.
ThrowIfFailed(mVertexBuffer->Map(0, &readRange, &pVertexDataBuffer));
std::memcpy(pVertexDataBuffer, &vec[0], bufferSize);
mVertexBuffer->Unmap(0, nullptr);
// Initialize the vertex buffer view.
mVertexBufferView.BufferLocation = mVertexBuffer->GetGPUVirtualAddress();
mVertexBufferView.StrideInBytes = sizeof(Vertex);
mVertexBufferView.SizeInBytes = bufferSize;
}
After Init, This code will be executed.
void Engine::Render() {
mCommandQueue->RenderBegin(mSwapChain, mDescriptorHeap, shader->GetGraphicsPSO(), &mViewport, &mScissorRect);
mesh->Render(mCommandQueue->GetCommandList());
mCommandQueue->RenderEnd(mSwapChain);
}
void CommandQueue::RenderBegin(SwapChain* swapChain, DescriptorHeap* descHeap, GraphicsPSO* graphicsPSO, const D3D12_VIEWPORT* viewPort, const D3D12_RECT* scissorRect) {
ThrowIfFailed(mCommandAllocator->Reset());
ThrowIfFailed(mCommandList->Reset(mCommandAllocator, graphicsPSO->GetPipelineState()));
mCommandList->SetGraphicsRootSignature(graphicsPSO->GetRootSignatrue()->GetRootSignature());
mCommandList->RSSetViewports(1, viewPort);
mCommandList->RSSetScissorRects(1, scissorRect);
// Indicate that the back buffer will be used as a render target.
D3D12_RESOURCE_BARRIER resourceBarrierDesc = CD3DX12_RESOURCE_BARRIER::Transition(swapChain->GetBackRtvBuffer(),
D3D12_RESOURCE_STATE_PRESENT,
D3D12_RESOURCE_STATE_RENDER_TARGET);
mCommandList->ResourceBarrier(1, &resourceBarrierDesc);
D3D12_CPU_DESCRIPTOR_HANDLE rtvHandle = descHeap->GetBackRtvHandle();
const float clearColor[] = { 0.0f, 0.2f, 0.4f, 1.0f };
mCommandList->OMSetRenderTargets(1, &rtvHandle, FALSE, nullptr);
mCommandList->ClearRenderTargetView(rtvHandle, clearColor, 0, nullptr);
}
void Mesh::Render(ID3D12GraphicsCommandList* cmdList) {
cmdList->IASetPrimitiveTopology(D3D_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
cmdList->IASetVertexBuffers(0, 1, &mVertexBufferView);
cmdList->DrawInstanced(mVertexCount, 1, 0, 0);
}
void CommandQueue::RenderEnd(SwapChain* swapChain) {
// Indicate that the back buffer will now be used to present.
D3D12_RESOURCE_BARRIER resourceBarrierDesc = CD3DX12_RESOURCE_BARRIER::Transition(swapChain->GetBackRtvBuffer(),
D3D12_RESOURCE_STATE_RENDER_TARGET,
D3D12_RESOURCE_STATE_PRESENT);
mCommandList->ResourceBarrier(1, &resourceBarrierDesc);
ThrowIfFailed(mCommandList->Close());
// Execute the command list.
ID3D12CommandList* ppCommandLists[] = { mCommandList };
mCommandQueue->ExecuteCommandLists(_countof(ppCommandLists), ppCommandLists);
// Back buffer <-> Front buffer
swapChain->Present();
WaitForPreviousFrame(swapChain);
swapChain->SwapIndex();
}
Result is like this:
enter image description here
There's no error. I compared my code with DirectX sample code "Hello Triangle", but I wrote most of DirectX code to render such as OMSetRenderTargets, ResourceBarrier, etc.
Windows 10, GTX 750ti, support DirectX 12.
I cought the reason myself. It is because I got wrong with initialization of pipeline states.
ID3D12PipelineState* pipelineState = mGraphicsPSO->GetPipelineState();
D3D12_GRAPHICS_PIPELINE_STATE_DESC desc = mGraphicsPSO->GetGraphicsPipelineStateDesc();
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&pipelineState)));
this was my code where create graphics pipeline state. When I wrote this code, I had an error.
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&mGraphicsPSO->GetPipelineState())));
I wanted to write like above. But it makes compile error, which is "C++ expression must be an lvalue or a function designator", because mGraphicsPSO->GetPipelineState()'s return is rvalue.
So, I thought that it might work if I wrote like the original code(the first code). UNFORTUNATELY it doesn't make compile error. It "works".
The problem of the first code is that the pipeline state pointer in class of GraphicsPSO is still nullptr because ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&pipelineState))); this code will create graphics pipeline state to "pipelineState" which is not a member of class of GraphicsPSO.
And I wrote this question to StackOverflow.
Anyway, I suspected the first code and wrote test code:
D3D12_GRAPHICS_PIPELINE_STATE_DESC desc = mGraphicsPSO->GetGraphicsPipelineStateDesc();
ThrowIfFailed(device->CreateGraphicsPipelineState(&desc, IID_PPV_ARGS(&mGraphicsPSO->mPipelineState)));
I moved mPipelineState from protected to public and wrote code like above. It works well.
enter image description here
This is result.
I found the fundamental reason of the error but it doesn't mean that it is finished. I need to think how to fix my first code.

Directx11 drawing to wrong render target

I'm attempting to render to a texture for the purpose of shadow mapping in DirectX11. I've set up and bound a separate render target to draw to. Problem is, after calling OMSetRenderTargets it's still rendering to the previously bound render target.
The graphics diagnostics event list shows that OMSetRenderTargets is being called, setting "obj:30" as the render target view. However, the following DrawIndexed call shows the render target as "obj:17", which is the previously bound render target.
Event List
Draw Call
I have the DirectX debug layer enabled, however it does not show any errors or warning messages. I've also ensured that the texture is not bound as a shader resource when the draw call happens but no luck there either.
These are both called by the following function
void GraphicsHandler::DrawSceneToRenderTarget(ID3D11RenderTargetView* RenderTarget, ID3D11VertexShader* WithVertexShader, ID3D11PixelShader* WithPixelShader)
{
const unsigned int VertexSize = sizeof(Vertex);
const unsigned int Offset = 0;
DeviceContext->ClearDepthStencilView(DepthStencilView, D3D11_CLEAR_DEPTH | D3D11_CLEAR_STENCIL, 1.0f, 0.0f);
DeviceContext->VSSetShader(WithVertexShader, nullptr, 0);
DeviceContext->PSSetShader(WithPixelShader, nullptr, 0);
DeviceContext->OMSetRenderTargets(1, &RenderTarget, DepthStencilView); //Render target set here
for (auto& Obj : ActiveScene.Objects)
{
ObjectInfo ObjectData;
ObjectData.ObjectTransform = XMMatrixIdentity();
ObjectData.ObjectTransform *= XMMatrixRotationRollPitchYaw(Obj->Rotator.X, Obj->Rotator.Y, Obj->Rotator.Z);
ObjectData.ObjectTransform *= XMMatrixTranslation(Obj->Position.X, Obj->Position.Y, Obj->Position.Z);
ObjectData.ObjectTransform *= XMMatrixScaling(Obj->Scale.X, Obj->Scale.Y, Obj->Scale.Z);
ObjectData.NormalMatrix = XMMatrixTranspose(XMMatrixInverse(nullptr, ObjectData.ObjectTransform));
DeviceContext->UpdateSubresource(ObjectBuffer, 0, nullptr, &ObjectData, 0, 0);
DeviceContext->UpdateSubresource(MaterialBuffer, 0, nullptr, &Obj->Mat, 0, 0);
DeviceContext->IASetVertexBuffers(0, 1, &Obj->VertexBuffer, &VertexSize, &Offset);
DeviceContext->IASetIndexBuffer(Obj->IndexBuffer, DXGI_FORMAT_R16_UINT, 0);
DeviceContext->VSSetConstantBuffers(0, 1, &ObjectBuffer);
//DeviceContext->PSSetConstantBuffers(0, 1, &MaterialBuffer);
DeviceContext->DrawIndexed(Obj->Indices.size(), 0, 0); //Draw called here
}
}
with the problematic calls to that being in the following two functions
void GraphicsHandler::RenderSceneDepth()
{
DeviceContext->RSSetState(RasterizerState);
DeviceContext->PSSetShaderResources(0, 1, &SceneDepthSRV);
DeviceContext->UpdateSubresource(CameraBuffer, 0, nullptr, &ActiveScene.SceneCamera.GetCameraVSInfo(), 0, 0);
DeviceContext->VSSetConstantBuffers(1, 1, &CameraBuffer);
DeviceContext->ClearRenderTargetView(SceneDepthRTV, Colors::Black);
DrawSceneToRenderTarget(SceneDepthRTV, VertexShader, DepthShader);
}
void GraphicsHandler::RenderShadowMap(ShadowMap& SM)
{
//Clear shader resources, as the texture can't be bound as input and output
ID3D11ShaderResourceView* NullResources[2] = { nullptr, nullptr };
DeviceContext->PSSetShaderResources(0, 2, NullResources);
DeviceContext->RSSetState(SMRasterizerState); //Need to render back faces only
ID3D11SamplerState* Samplers[2] = { SamplerState, ShadowSamplerState };
DeviceContext->PSSetSamplers(0, 2, Samplers);
//If the light is a directional source, render a directional shadow map
DirectionalLight* DirLight = nullptr;
DirLight = dynamic_cast<DirectionalLight*>(SM.ParentLight);
if (DirLight)
{
ID3D11RenderTargetView* RTV = SM.RTVs[0];
SM.LightPovCamera.ForwardDirection = DirLight->Direction;
DeviceContext->ClearRenderTargetView(RTV, Colors::Black);
DeviceContext->UpdateSubresource(LightPovBuffer, 0, nullptr, &SM.LightPovCamera.GetCameraVSInfo(), 0, 0);
DeviceContext->VSSetConstantBuffers(1, 1, &LightPovBuffer);
DrawSceneToRenderTarget(RTV, VertexShader, DepthShader);
}
//Otherwise, render to each face of the texturecube
else
{
for (int N = 0; N < 6; N++)
{
DeviceContext->ClearRenderTargetView(SM.RTVs[N], Colors::Black);
Camera POVCam = SM.GetCameraForCubemapFace(N);
DeviceContext->UpdateSubresource(LightPovBuffer, 0, nullptr, &POVCam.GetCameraVSInfo(), 0, 0);
DeviceContext->VSSetConstantBuffers(1, 1, &LightPovBuffer);
DrawSceneToRenderTarget(SM.RTVs[N], VertexShader, DepthShader);
}
}
}
Woops my mistake, the debug layer actually wasn't enabled, the error was caused by the render target having different dimensions to the depth stencil view. Apologies!

Im trying to use OpenGL with the windows API on different threads

So basically I am using the windows api to create an emty window and then I use OpenGL to draw to that window from different threads. I managed to do this just with one thread, but getting and dispatching system messages so that the window is usable was slowing down the frame rate I was able to get, so I'm trying to get another thread to do that in parallel while I draw in the main thread.
To do this I have a second thread which creates an empty window and enters an infinite loop to handle the windows message loop. Before entering the loop it passes the HWND of the empty window to the main thread so OpenGl can be initialised. To do that I use the PostThreadMessage function and use the message code WM_USER and the wParam of the message to send the window handler back. Here is the code to that secondary thread:
bool t2main(DWORD parentThreadId, int x = 0, int y = 0, int w = 256, int h = 256, int pixelw = 2, int pixelh = 2, const char* windowName = "Window") {
// Basic drawing values
int sw = w, sh = h, pw = pixelw, ph = pixelh;
int ww = 0; int wh = 0;
// Windows API window handler
HWND windowHandler;
// Calculate total window dimensions
ww = sw * pw; wh = sh * ph;
// Create the window handler
WNDCLASS wc;
wc.hIcon = LoadIcon(NULL, IDI_APPLICATION);
wc.hCursor = LoadCursor(NULL, IDC_ARROW);
wc.style = CS_HREDRAW | CS_VREDRAW | CS_OWNDC;
wc.hInstance = GetModuleHandle(nullptr);
wc.lpfnWndProc = DefWindowProc;
wc.cbClsExtra = 0;
wc.cbWndExtra = 0;
wc.lpszMenuName = nullptr;
wc.hbrBackground = nullptr;
wc.lpszClassName = "windowclass";
RegisterClass(&wc);
DWORD dwExStyle = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE;
DWORD dwStyle = WS_CAPTION | WS_SYSMENU | WS_VISIBLE | WS_THICKFRAME;
RECT rWndRect = { 0, 0, ww, wh };
AdjustWindowRectEx(&rWndRect, dwStyle, FALSE, dwExStyle);
int width = rWndRect.right - rWndRect.left;
int height = rWndRect.bottom - rWndRect.top;
windowHandler = CreateWindowEx(dwExStyle, "windowclass", windowName, dwStyle, x, y, width, height, NULL, NULL, GetModuleHandle(nullptr), NULL);
if(windowHandler == NULL) { return false; }
PostThreadMessageA(parentThreadId, WM_USER, (WPARAM) windowHandler, 0);
for(;;) {
MSG msg;
PeekMessageA(&msg, NULL, 0, 0, PM_REMOVE);
DispatchMessageA(&msg);
}
}
This function gets called from the main entry point, which correctly recieves the window handler and then tries to setup OpenGL with it. Here is the code:
int main() {
// Basic drawing values
int sw = 256, sh = 256, pw = 2, ph = 2;
int ww = 0; int wh = 0;
const char* windowName = "Window";
// Thread stuff
DWORD t1Id, t2Id;
HANDLE t1Handler, t2Handler;
// Pixel array
Pixel* pixelBuffer = nullptr;
// OpenGl device context to draw
HDC glDeviceContext;
HWND threadWindowHandler;
t1Id = GetCurrentThreadId();
std::thread t = std::thread(&t2main, t1Id, 0, 0, sw, sh, pw, ph, windowName);
t.detach();
t2Handler = t.native_handle();
t2Id = GetThreadId(t2Handler);
while(true) {
MSG msg;
PeekMessageA(&msg, NULL, WM_USER, WM_USER + 100, PM_REMOVE);
if(msg.message == WM_USER) {
threadWindowHandler = (HWND) msg.wParam;
break;
}
}
// Initialise OpenGL with thw window handler that we just created
glDeviceContext = GetDC(threadWindowHandler);
PIXELFORMATDESCRIPTOR pfd = {
sizeof(PIXELFORMATDESCRIPTOR), 1,
PFD_DRAW_TO_WINDOW | PFD_SUPPORT_OPENGL | PFD_DOUBLEBUFFER,
PFD_TYPE_RGBA, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
PFD_MAIN_PLANE, 0, 0, 0, 0
};
int pf = ChoosePixelFormat(glDeviceContext, &pfd);
SetPixelFormat(glDeviceContext, pf, &pfd);
HGLRC glRenderContext = wglCreateContext(glDeviceContext);
wglMakeCurrent(glDeviceContext, glRenderContext);
// Create an OpenGl buffer
GLuint glBuffer;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glBuffer);
glBindTexture(GL_TEXTURE_2D, glBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Create a pixel buffer to hold the screen data and allocate space for it
pixelBuffer = new Pixel[sw * sh];
for(int32_t i = 0; i < sw * sh; i++) {
pixelBuffer[i] = Pixel();
}
// Test a pixel
pixelBuffer[10 * sw + 10] = Pixel(255, 255, 255);
// Push the current buffer into view
glViewport(0, 0, ww, wh);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sw, sh, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelBuffer);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0f, -1.0f, 0.0f);
glEnd();
SwapBuffers(glDeviceContext);
for(;;) {}
}
To hold the pixel information I'm using this struct:
struct Pixel {
union {
uint32_t n = 0xFF000000; //Default 255 alpha
struct {
uint8_t r; uint8_t g; uint8_t b; uint8_t a;
};
};
Pixel() {
r = 0;
g = 0;
b = 0;
a = 255;
}
Pixel(uint8_t red, uint8_t green, uint8_t blue, uint8_t alpha = 255) {
r = red;
g = green;
b = blue;
a = alpha;
}
};
When I try to run this code I don't get the desired pixel output, instead I just get the empty window, as if OpenGl handn't initialised correctly. When I use the same code but all into one thread I get the empty window with the pixel in it. What am I doing wrong here?, Is there something I need to do before I initialise OpenGl in another thread? I apreciate all kind of feedback. Thanks in advance.
There are several issues here. Let's address them in order.
First let's recall the rules of:
OpenGL and threads
The basic rules about OpenGL with regard to windows, device context and threads are:
An OpenGL context is not associated with a particular window or device context.
You can make a OpenGL context "current" on any device context (HDC, usually associated with a Window) that is compatible to the device context with which the context was original created with.
An OpenGL context can be "current" on only one thread at a time, or not be active at all.
To move OpenGL context "current state" from one thread to another you do:
first: unmake "current" the context on the thread it's currently used on
second: make it "current" on the thread you want to be current on.
More than one (including all) threads in a process can have a OpenGL context "current" at the same time.
Multiple OpenGL contexts (including all) – which will be rule 5 be current in different threads – can be current with the same device context (HDC) at the same time.
There are no defined rules for drawing commands happening concurrently on different threads, but current on the same HDC. Ordering must happen by the user, by placing appropriate locks that work OpenGL synchronization primitives. Until the introduction of explicit, fine grains synchronization objects into OpenGL the only synchronization available were glFinish and the implicit synchronization point calls of OpenGL (e.g. glReadPixels).
Misconceptions in your understanding what OpenGL does
This comes from reading the comments in your code:
int main() {
Why is your thread function called main. main is a reserved name, exclusively to be used for the process entry function. Even if your entry is WinMain you must not use main as a functio name.
// Pixel array
Pixel* pixelBuffer = nullptr;
It's unclear what the pixelBuffer is meant for, later on. You will call it on a texture. but apparently don't set up the drawing to use a texture.
t1Id = GetCurrentThreadId();
std::thread t = std::thread(&t2main, t1Id, 0, 0, sw, sh, pw, ph, windowName);
t.detach();
t2Handler = t.native_handle();
t2Id = GetThreadId(t2Handler);
What, I don't even. What is this supposed to do in the first place? First things first: Don't mix Win32 threads API and C++ std::thread. Decice in one, and stick with it.
while(true) {
MSG msg;
PeekMessageA(&msg, NULL, WM_USER, WM_USER + 100, PM_REMOVE);
if(msg.message == WM_USER) {
threadWindowHandler = (HWND) msg.wParam;
break;
}
}
Why the hell are you passing the window handle through a thread message? This is so wrong on so many levels. Threads all live in the same address space, so you could use a queue, or global variables, or pass is as parameter to the thread entry function, etc., etc.
Furthermore you could just have created the OpenGL context in the main thread and then just passed it over.
wglMakeCurrent(glDeviceContext, glRenderContext);
// Create an OpenGl buffer
GLuint glBuffer;
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &glBuffer);
That doesn't create an OpenGL buffer object, it creates a texture name.
glBindTexture(GL_TEXTURE_2D, glBuffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
// Create a pixel buffer to hold the screen data and allocate space
pixelBuffer[10 * sw + 10] = Pixel(255, 255, 255);for it
Uhh, no, you don't supply drawable buffers to OpenGL in that way. Heck, you don't even supply draw buffers to OpenGL explicitly at all (this is not D3D12, Metal or Vulkan, where you do).
// Push the current buffer into view
glViewport(0, 0, ww, wh);
Noooo. That's not what glViewport does!
glViewport is part of the transformation pipeline state and ultimately is sets the destination rectangle of where inside a drawable the clip space volume will be mapped to. It does absolutely nothing with respect to the drawable buffers.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, sw, sh, 0, GL_RGBA, GL_UNSIGNED_BYTE, pixelBuffer);
I think you don't understand what a texture is for. What this call does is, copying over the contexts of pixelBuffer into the currently bound texture. After that OpenGL is no longer concerned with pixelBuffer at all.
glBegin(GL_QUADS);
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0f, -1.0f, 0.0f);
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 0.0); glVertex3f(1.0f, 1.0f, 0.0f);
glTexCoord2f(1.0, 1.0); glVertex3f(1.0f, -1.0f, 0.0f);
glEnd();
Here you draw something, but never enabled the use of the texture in the first place. So all that ado about setting up the texture is for nothing.
SwapBuffers(glDeviceContext);
for(;;) {}
}
So after swapping the window buffers you make the thread spin forever. Two problems with that: There is still the main message loop over in the other thread that does handle other messages for the window. Including maybe WM_PAINT, and depending on if you've set a background brush and/or how you handle WM_ERASEBKGND whatever you just draw might instantly vanish thereafter.
And by spinning the thread you're consuming CPU time for no reason whatsover. You could just as well end the thread.
I solved the problem with the help of #datenwolf's comment primarly. Firstly, I used variable pointer to pass variables between threads, which removed the need for PostThreadMessageA, which was the main reasson why I was using winapi threads in the first place. I also changed the OpenGl code a bit and finally got what I wanted.

Only glClear(..) color is displayed, nothing else rendered (CUDA/OpenGL interop)

I have a WinForms application with a panel (500x500 pixels) that I want to render something in. At this point I am just trying to fill it in with a specific color. I want to use OpenGL/CUDA interop to do this.
I got the panel configured to be the region to render stuff in, however when I run my code, the panel just gets filled with the glClear(..) color, and nothing assigned by the kernel is displayed. It sort of worked this morning (inconsistently), and in my attempt to sort out the SwapBuffers() mess, I think I screwed it up.
Here is the pixel format initialization for OpenGL. It seems to work fine, I have the two buffers as I expected, and the context is correct:
static PIXELFORMATDESCRIPTOR pfd=
{
sizeof(PIXELFORMATDESCRIPTOR), // Size Of This Pixel Format Descriptor
1, // Version Number
PFD_DRAW_TO_WINDOW | // Format Must Support Window
PFD_SUPPORT_OPENGL | // Format Must Support OpenGL
PFD_DOUBLEBUFFER, // Must Support Double Buffering
PFD_TYPE_RGBA, // Request An RGBA Format
16, // Select Our Color Depth
0, 0, 0, 0, 0, 0, // Color Bits Ignored
0, // No Alpha Buffer
0, // Shift Bit Ignored
0, // No Accumulation Buffer
0, 0, 0, 0, // Accumulation Bits Ignored
16, // 16Bit Z-Buffer (Depth Buffer)
0, // No Stencil Buffer
0, // No Auxiliary Buffer
PFD_MAIN_PLANE, // Main Drawing Layer
0, // Reserved
0, 0, 0 // Layer Masks Ignored
};
GLint iPixelFormat;
// get the device context's best, available pixel format match
if((iPixelFormat = ChoosePixelFormat(hdc, &pfd)) == 0)
{
MessageBox::Show("ChoosePixelFormat Failed");
return 0;
}
// make that match the device context's current pixel format
if(SetPixelFormat(hdc, iPixelFormat, &pfd) == FALSE)
{
MessageBox::Show("SetPixelFormat Failed");
return 0;
}
if((m_hglrc = wglCreateContext(m_hDC)) == NULL)
{
MessageBox::Show("wglCreateContext Failed");
return 0;
}
if((wglMakeCurrent(m_hDC, m_hglrc)) == NULL)
{
MessageBox::Show("wglMakeCurrent Failed");
return 0;
}
After this is done, I set up the ViewPort as such:
glViewport(0,0,iWidth,iHeight); // Reset The Current Viewport
glMatrixMode(GL_MODELVIEW); // Select The Modelview Matrix
glLoadIdentity(); // Reset The Modelview Matrix
glEnable(GL_DEPTH_TEST);
Then I set up the clear color and do a clear:
glClearColor(1.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT);
Now I set up the CUDA/OpenGL interop:
cudaDeviceProp prop; int dev;
memset(&prop, 0, sizeof(cudaDeviceProp));
prop.major = 1; prop.minor = 0;
checkCudaErrors(cudaChooseDevice(&dev, &prop));
checkCudaErrors(cudaGLSetGLDevice(dev));
glBindBuffer = (PFNGLBINDBUFFERARBPROC)GET_PROC_ADDRESS("glBindBuffer");
glDeleteBuffers = (PFNGLDELETEBUFFERSARBPROC)GET_PROC_ADDRESS("glDeleteBuffers");
glGenBuffers = (PFNGLGENBUFFERSARBPROC)GET_PROC_ADDRESS("glGenBuffers");
glBufferData = (PFNGLBUFFERDATAARBPROC)GET_PROC_ADDRESS("glBufferData");
GLuint bufferID;
cudaGraphicsResource * resourceID;
glGenBuffers(1, &bufferID);
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, bufferID);
glBufferData(GL_PIXEL_UNPACK_BUFFER_ARB, fWidth*fHeight*4, NULL, GL_DYNAMIC_DRAW_ARB);
checkCudaErrors(cudaGraphicsGLRegisterBuffer( &resourceID, bufferID, cudaGraphicsMapFlagsNone ));
Now I try to call my kernel (which just paints each pixel a specific color) and have that displayed.
uchar4* devPtr;
size_t size;
// First clear the back buffer:
glClearColor(1.0f, 0.5f, 0.0f, 0.0f); // orange
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
checkCudaErrors(cudaGraphicsMapResources(1, &resourceID, NULL));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resourceID));
animate(devPtr); // This will call the kernel and do a sync (see later)
checkCudaErrors(cudaGraphicsUnmapResources(1, &resourceID, NULL));
// Swap buffers to bring back buffer forward:
SwapBuffers(m_hDC);
At this point I expect to see the kernel colors on the screen, but no! I see orange, which is the clear color that I just set.
Here is the call to the kernel:
void animate(uchar4* dispPtr)
{
checkCudaErrors(cudaDeviceSynchronize());
animKernel<<<blocks, threads>>>(dispPtr, envdim);;
checkCudaErrors(cudaDeviceSynchronize());
}
Here envdim is just the dimensions (so 500x500). The kernel itself:
__global__ void animKernel(uchar4 *optr, dim3 matdim)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * matdim.x;
if (x < matdim.x && y < matdim.y)
{
// BLACK:
optr[offset].x = 0; optr[offset].y = 0; optr[offset].z = 0;
}
}
Things I've done:
The value returned by cudaGraphicsResourceGetMappedPointer's size is 1000000, which corresponds to the 500x500 matrix of uchar4, so that's good.
Each kernel printed the value and location that it was writing to, and that seemed ok.
Played with the alpha value for the clear color, but that doesn't seem to do anything (yet?)
Ran the animate() function several times. Don't know why I thought that would help, but I tried it...
So I guess I'm missing something, but I'm going kind of crazy looking for it. Any advice? Help?
It's another one of those questions I answer myself! Hmph, as I figured, it was a one line issue. The problem resides in the rendering call itself.
The configuration is fine, the one issue I have with the code above is:
I never called glDrawPixels(), which is necessary in order for the OpenGL driver to copy the shared buffer (GL_PiXEL_UNPACK_BUFFER_ARB) source to the display buffer. The correct rendering sequence is then:
uchar4* devPtr;
size_t size;
// First clear the back buffer:
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
checkCudaErrors(cudaGraphicsMapResources(1, &resourceID, NULL));
checkCudaErrors(cudaGraphicsResourceGetMappedPointer((void**)&devPtr, &size, resourceID));
animate(devPtr); // This will call the kernel and do a sync (see later)
checkCudaErrors(cudaGraphicsUnmapResources(1, &resourceID, NULL));
// This is necessary to copy the shared buffer to display
glDrawPixels(fWidth, fHeight, GL_RGBA, GL_UNSIGNED_BYTE, 0);
// Swap buffers to bring back buffer forward:
SwapBuffers(m_hDC);
I'd like to thank the Acade-- uh, CUDA By Example, once again for helping me. Even though the example code from the book used GLUT (which was completely useless for this...), the book referenced normal gl functions.

D3D11 DrawIndexed() is drawing to the wrong render target

I'm attempting to render a scene to two textures (left and right) for use with the Oculus Rift. When I set the render target to a 2D texture render view and call DrawIndexed() it renders to the back buffer instead of the texture. I'm using Visual Studio, and I've run the Graphics Diagnostics on it. On the DrawIndexed() event, it shows the render target is the texture, but the pixel history doesn't show the event. If I don't clear the backbuffer, the scene shows up on the screen.
In the following code, the RenderLeft() function should render an image to a plane on a green background with the render target set as the left render texture. Then RenderRight() should take the texture rendered by RenderLeft(), and render it to the plane, then output that on the back buffer. (Note: This isn't the normal set up. This is just to help see if the texture is being rendered to or not)
In the final output, there should be nothing on the left side of the screen, and on the right should be the source image inside a green rectangle on a black background.
Instead, I get this: http://i.imgur.com/dHX5Ed3.png?1
RenderLeft is rendering to the back buffer, even though the render target is a texture, so then the texture used by RenderRight is just the color used to clear it.
Here is the code I'm currently using. I think I've included everything that's relevant.
// this is the function used to render a single frame
void Direct3D::RenderFrame()
{
CreateTransforms(); //this creates matFinalLeft and matFinalRight, which is (world matrix)*(view matrix)*(projection matrix) with the proper offsets for a stereoscopic view.
setVertices(); //this sets the vertex and index buffers.
setMainShaders(); // this sets the shaders used to render the 3D scene
RenderLeft(pTextureLeftRenderView, matFinalLeft, viewportLeft, true); //this renders an image to a plane on a green background. It SHOULD render to a texture.
RenderRight(backbuffer, matFinalRight, viewportRight, false);//this renders the render target from RenderLeft to the plane and renders to the back buffer.
swapchain->Present(0, 0); //output back buffer to screen.
}
This section should render a rectangle textured with an image to the left side of the render texture.
//Render the scene to the left side of a texture
void Direct3D::RenderLeft(ID3D11RenderTargetView *RenderTarget, D3DXMATRIX matFinal, D3D11_VIEWPORT viewport, bool clearRenderTarget){
devcon->OMSetRenderTargets(1, &RenderTarget, zbuffer);
devcon->RSSetViewports(1, &viewport);
// update shader resources
devcon->UpdateSubresource(pCBufferPrimaryShader, 0, 0, &matFinal, 0, 0);
devcon->PSSetShaderResources(0, 1, &pTextureLeftResourceView);
// clear the depth buffer and render target texture
devcon->ClearDepthStencilView(zbuffer, D3D11_CLEAR_DEPTH, 1.0f, 0);
if (clearRenderTarget){
devcon->ClearRenderTargetView(RenderTarget, D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f));
}
// render to texture on left side (oculus) or full texture
devcon->DrawIndexed(6, 0, 0);
}
This section should render a rectangle with the texture from RenderLeft() to the back buffer.
//Render the scene to the right side of the back buffer
void Direct3D::RenderRight(ID3D11RenderTargetView *RenderTarget, D3DXMATRIX matFinal, D3D11_VIEWPORT viewport, bool clearRenderTarget){
//render to texture
devcon->OMSetRenderTargets(1, &RenderTarget, zbuffer);
devcon->RSSetViewports(1, &viewport);
// update shader resources
devcon->UpdateSubresource(pCBufferPrimaryShader, 0, 0, &matFinal, 0, 0);
devcon->PSSetShaderResources(0, 1, &pRenderTextureLeftResourceView);
// clear the depth buffer and render target texture
devcon->ClearDepthStencilView(zbuffer, D3D11_CLEAR_DEPTH, 1.0f, 0);
if (clearRenderTarget){
devcon->ClearRenderTargetView(RenderTarget, D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f));
}
// render to texture on left side (oculus) or full texture
devcon->DrawIndexed(6, 0, 0);
}
Finally, the code that creates the various views and viewports
void Direct3D::InitD3D(HWND hWnd)
{
// create a struct to hold information about the swap chain
DXGI_SWAP_CHAIN_DESC scd;
// clear out the struct for use
ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC));
// fill the swap chain description struct
scd.BufferCount = 1; // one back buffer
scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // use 32-bit color
scd.BufferDesc.Width = screen_width;
scd.BufferDesc.Height = screen_height;
scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; // how swap chain is to be used
scd.OutputWindow = hWnd; // the window to be used
scd.SampleDesc.Count = 4; // how many multisamples
scd.Windowed = TRUE; // windowed/full-screen mode
scd.Flags = DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH;
// create a device, device context and swap chain using the information in the scd struct
D3D11CreateDeviceAndSwapChain(NULL,
D3D_DRIVER_TYPE_HARDWARE,
NULL,
NULL,
NULL,
NULL,
D3D11_SDK_VERSION,
&scd,
&swapchain,
&dev,
NULL,
&devcon);
// create the depth buffer texture
D3D11_TEXTURE2D_DESC texd;
ZeroMemory(&texd, sizeof(texd));
texd.Width = screen_width;
texd.Height = screen_height;
texd.ArraySize = 1;
texd.MipLevels = 1;
texd.SampleDesc.Count = 4;
texd.Format = DXGI_FORMAT_D32_FLOAT;
texd.BindFlags = D3D11_BIND_DEPTH_STENCIL;
ID3D11Texture2D *pDepthBuffer;
dev->CreateTexture2D(&texd, NULL, &pDepthBuffer);
// create the depth buffer
D3D11_DEPTH_STENCIL_VIEW_DESC dsvd;
ZeroMemory(&dsvd, sizeof(dsvd));
dsvd.Format = DXGI_FORMAT_D32_FLOAT;
dsvd.ViewDimension = D3D11_DSV_DIMENSION_TEXTURE2DMS;
dev->CreateDepthStencilView(pDepthBuffer, &dsvd, &zbuffer);
pDepthBuffer->Release();
// get the address of the back buffer
ID3D11Texture2D *pBackBuffer;
swapchain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&pBackBuffer);
// use the back buffer address to create the render target
dev->CreateRenderTargetView(pBackBuffer, NULL, &backbuffer);
pBackBuffer->Release();
//create intermediate render textures
ID3D11Texture2D *pRenderTextureLeft;
D3D11_TEXTURE2D_DESC textureDesc;
D3D11_RENDER_TARGET_VIEW_DESC renderTargetViewDesc;
D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc;
ZeroMemory(&textureDesc, sizeof(textureDesc));
textureDesc.Width = screen_width;
textureDesc.Height = screen_height;
if (oculus){
textureDesc.Width = (UINT)((FLOAT)textureDesc.Width * oculus->renderScale);
textureDesc.Height = (UINT)((FLOAT)textureDesc.Height *oculus->renderScale);
}
textureDesc.MipLevels = 1;
textureDesc.ArraySize = 1;
textureDesc.Format = DXGI_FORMAT_R32G32B32A32_FLOAT;
textureDesc.SampleDesc.Count = 1;
textureDesc.Usage = D3D11_USAGE_DEFAULT;
textureDesc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE;
textureDesc.CPUAccessFlags = 0;
textureDesc.MiscFlags = 0;
dev->CreateTexture2D(&textureDesc, NULL, &pRenderTextureLeft);
renderTargetViewDesc.Format = textureDesc.Format;
renderTargetViewDesc.ViewDimension = D3D11_RTV_DIMENSION_TEXTURE2D;
renderTargetViewDesc.Texture2D.MipSlice = 0;
dev->CreateRenderTargetView(pRenderTextureLeft, &renderTargetViewDesc, &pTextureLeftRenderView);
shaderResourceViewDesc.Format = textureDesc.Format;
shaderResourceViewDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
dev->CreateShaderResourceView(pRenderTextureLeft, &shaderResourceViewDesc, &pRenderTextureLeftResourceView);
ID3D11Texture2D *pRenderTextureRight;
dev->CreateTexture2D(&textureDesc, NULL, &pRenderTextureRight);
dev->CreateRenderTargetView(pRenderTextureRight, &renderTargetViewDesc, &pTextureRightRenderView);
dev->CreateShaderResourceView(pRenderTextureRight, &shaderResourceViewDesc, &pRenderTextureRightResourceView);
/*if (oculus){
pOculusOutputDevice = oculus->searchForOculusDisplay(oculus->hmd.DisplayDeviceName);
swapchain->SetFullscreenState(TRUE, pOculusOutputDevice);
}*/
// Set the viewport
ZeroMemory(&viewportLeft, sizeof(D3D11_VIEWPORT));
ZeroMemory(&viewportRight, sizeof(D3D11_VIEWPORT));
ZeroMemory(&viewportCenter, sizeof(D3D11_VIEWPORT));
viewportCenter.TopLeftX = 0.0f;
viewportCenter.TopLeftY = 0.0f;
if (oculus){
viewportCenter.Width = (FLOAT)screen_width*oculus->renderScale;
viewportCenter.Height = (FLOAT)screen_height*oculus->renderScale;
}
else{
viewportCenter.Width = (FLOAT)screen_width;
viewportCenter.Height = (FLOAT)screen_height;
}
viewportCenter.MinDepth = 0.0f;
viewportCenter.MaxDepth = 1.0f;
if (dual_mode){
viewportLeft.TopLeftX = 0.0f;
viewportLeft.TopLeftY = 0.0f;
viewportLeft.Width = (FLOAT)screen_width / 2.0f;
viewportLeft.Height = (FLOAT)screen_height;
viewportLeft.MinDepth = 0.0f;
viewportLeft.MaxDepth = 1.0f;
viewportRight.TopLeftX = (FLOAT)screen_width / 2.0f;
viewportRight.TopLeftY = 0.0f;
viewportRight.Width = (FLOAT)screen_width / 2.0f;
viewportRight.Height = (FLOAT)screen_height;
viewportRight.MinDepth = 0.0f;
viewportRight.MaxDepth = 1.0f;
}
devcon->RSSetViewports(1, &viewportCenter);
InitPipeline();
InitGraphics();
}
Per request, here is some more code:
I'm including the entire Direct3D class header, so you can see what are and are not member variables.
#pragma once
#include "Oculus.h"
#include <OVR.h>
#include "Camera.h"
#include <d3d11.h>
#include <D3DX11.h>
#include <D3DX10.h>
#pragma comment (lib, "d3d11.lib")
#pragma comment (lib, "d3dx11.lib")
#pragma comment (lib, "d3dx10.lib")
class Direct3D
{
public:
struct VERTEX{ FLOAT X, Y, Z; D3DXCOLOR Color; FLOAT U, V; };
struct DISTORTION{
FLOAT LensCenter[2];
FLOAT ScreenCenter[2];
FLOAT Scale[2];
FLOAT ScaleIn[2];
FLOAT HmdWarpParam[4];
};
IDXGISwapChain *swapchain; // the pointer to the swap chain interface
ID3D11Device *dev; // the pointer to our Direct3D device interface
ID3D11DeviceContext *devcon; // the pointer to our Direct3D device context
ID3D11RenderTargetView *backbuffer;
IDXGIOutput* pOculusOutputDevice;
ID3D11VertexShader *pVS_Primary; // the vertex shader
ID3D11PixelShader *pPS_Primary; // the pixel shader
ID3D11VertexShader *pVS_Distortion;
ID3D11PixelShader *pPS_Distortion; // the pixel shader
ID3D11Buffer *pVBuffer; //vertec buffer
ID3D11Buffer *pIBuffer;
ID3D11InputLayout *pLayout_Primary;
ID3D11InputLayout *pLayout_Distortion;
D3D11_VIEWPORT viewportLeft;
D3D11_VIEWPORT viewportRight;
D3D11_VIEWPORT viewportCenter;
ID3D11Buffer *pCBufferPrimaryShader;
ID3D11Buffer *pCBufferDistortionShader;
ID3D11DepthStencilView *zbuffer; // the pointer to our depth buffer
ID3D11ShaderResourceView *pTextureLeftResourceView; // the pointer to the texture
ID3D11ShaderResourceView *pTextureRightResourceView;
ID3D11ShaderResourceView *pRenderTextureLeftResourceView;
ID3D11ShaderResourceView *pRenderTextureRightResourceView;
ID3D11RenderTargetView *pTextureLeftRenderView;
ID3D11RenderTargetView *pTextureRightRenderView;
D3DXMATRIX matFinalLeft;
D3DXMATRIX matFinalRight;
Camera cameraLeft, cameraRight;
int screen_width;
int screen_height;
bool dual_mode;
Oculus* oculus;
Direct3D(Oculus* oculus);
Direct3D();
~Direct3D();
void InitD3D(HWND hWnd); // sets up and initializes Direct3D
void CleanD3D(void); // closes Direct3D and releases memory
void RenderFrame();
void InitPipeline();
void InitGraphics();
void RenderLeft(ID3D11RenderTargetView *RenderTarget, D3DXMATRIX matFinal, D3D11_VIEWPORT viewport, bool clearRenderTarget);
void RenderRight(ID3D11RenderTargetView *RenderTarget, D3DXMATRIX matFinal, D3D11_VIEWPORT viewport, bool clearRenderTarget);
void DistortionCorrection(ID3D11RenderTargetView *RenderTarget);
void CreateTransforms();
void setVertices();
void setMainShaders();
void OVRMatrix4fToD3DXMatrix(OVR::Matrix4f& source, D3DXMATRIX& dest);
};
And here is the code that initializes the image textures (right now they load the same image to two different textures. It's eventually going to be the two sides of the 3D image. Just as soon as i figure out how to access the second image in the file)
FILENAME is #defined as the name of the image file I'm displaying
void Direct3D::InitGraphics()
{
D3DX11CreateShaderResourceViewFromFile(dev, // the Direct3D device
FILENAME, // load Wood.png in the local folder
NULL, // no additional information
NULL, // no multithreading
&pTextureLeftResourceView, // address of the shader-resource-view
NULL); // no multithreading
D3DX11CreateShaderResourceViewFromFile(dev, // the Direct3D device
FILENAME, // load Wood.png in the local folder
NULL, // no additional information
NULL, // no multithreading
&pTextureRightResourceView, // address of the shader-resource-view
NULL); // no multithreading
// get image size for rectangle mesh size
D3DX11_IMAGE_INFO info;
D3DX11GetImageInfoFromFile(FILENAME, NULL, &info, NULL);
FLOAT textureWidth = info.Width*0.001f;
FLOAT textureHeight = info.Height*0.001f;
// create vertices to represent the corners of the cube
VERTEX OurVertices[] =
{
{ -textureWidth, -textureHeight, 2.0f, D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f), 1.0f, 1.0f },
{ textureWidth, -textureHeight, 2.0f, D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f), 0.0f, 1.0f },
{ -textureWidth, textureHeight, 2.0f, D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f), 1.0f, 0.0f },
{ textureWidth, textureHeight, 2.0f, D3DXCOLOR(1.0f, 1.0f, 1.0f, 1.0f), 0.0f, 0.0f }
};
// create the vertex buffer
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(VERTEX)* 4;
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
dev->CreateBuffer(&bd, NULL, &pVBuffer);
// copy the vertices into the buffer
D3D11_MAPPED_SUBRESOURCE ms;
devcon->Map(pVBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, OurVertices, sizeof(OurVertices)); // copy the data
devcon->Unmap(pVBuffer, NULL);
// create the index buffer out of DWORDs
DWORD OurIndices[] =
{
0, 1, 2, // side 1
2, 1, 3,
};
// create the index buffer
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(DWORD)* 6;
bd.BindFlags = D3D11_BIND_INDEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
bd.MiscFlags = 0;
dev->CreateBuffer(&bd, NULL, &pIBuffer);
devcon->Map(pIBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, OurIndices, sizeof(OurIndices)); // copy the data
devcon->Unmap(pIBuffer, NULL);
}
And just in case you need it, here is the initialization of the rendering pipeline.
void Direct3D::InitPipeline()
{
// compile the shaders
ID3D10Blob *VS_Primary, *PS_Primary, *VS_Distortion, *PS_Distortion;
D3DX11CompileFromFile("vs_primary.hlsl", 0, 0, "VShader", "vs_5_0", 0, 0, 0, &VS_Primary, 0, 0);
D3DX11CompileFromFile("ps_primary.hlsl", 0, 0, "PShader", "ps_5_0", 0, 0, 0, &PS_Primary, 0, 0);
D3DX11CompileFromFile("vs_distortion.hlsl", 0, 0, "VShader", "vs_5_0", 0, 0, 0, &VS_Distortion, 0, 0);
D3DX11CompileFromFile("ps_distortion.hlsl", 0, 0, "main", "ps_5_0", 0, 0, 0, &PS_Distortion, 0, 0);
// create the shader objects
dev->CreateVertexShader(VS_Primary->GetBufferPointer(), VS_Primary->GetBufferSize(), NULL, &pVS_Primary);
dev->CreatePixelShader(PS_Primary->GetBufferPointer(), PS_Primary->GetBufferSize(), NULL, &pPS_Primary);
dev->CreateVertexShader(VS_Distortion->GetBufferPointer(), VS_Distortion->GetBufferSize(), NULL, &pVS_Distortion);
dev->CreatePixelShader(PS_Distortion->GetBufferPointer(), PS_Distortion->GetBufferSize(), NULL, &pPS_Distortion);
// set the shader objects
devcon->VSSetShader(pVS_Primary, 0, 0);
devcon->PSSetShader(pPS_Primary, 0, 0);
// create the input element object
D3D11_INPUT_ELEMENT_DESC ied[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 28, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
// use the input element descriptions to create the input layout
dev->CreateInputLayout(ied, 3, VS_Primary->GetBufferPointer(), VS_Primary->GetBufferSize(), &pLayout_Primary);
devcon->IASetInputLayout(pLayout_Primary);
dev->CreateInputLayout(ied, 3, VS_Distortion->GetBufferPointer(), VS_Distortion->GetBufferSize(), &pLayout_Distortion);
devcon->IASetInputLayout(pLayout_Distortion);
// create the constant buffer
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DEFAULT;
bd.ByteWidth = 64;
bd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
dev->CreateBuffer(&bd, NULL, &pCBufferPrimaryShader);
devcon->VSSetConstantBuffers(0, 1, &pCBufferPrimaryShader);
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DEFAULT;
bd.ByteWidth = 48;
bd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
dev->CreateBuffer(&bd, NULL, &pCBufferDistortionShader);
}
Pixel Shader:
Texture2D Texture;
SamplerState ss;
float4 PShader(float4 color : COLOR, float2 texcoord : TEXCOORD0) : SV_TARGET
{
return color * Texture.Sample(ss, texcoord);
}
Vertex Shader:
cbuffer ConstantBuffer
{
float4x4 matFinal;
}
struct VOut
{
float4 color : COLOR;
float2 texcoord : TEXCOORD0;
float4 position : SV_POSITION;
};
VOut VShader(float4 position : POSITION, float4 color : COLOR, float2 texcoord : TEXCOORD0)
{
VOut output;
output.position = mul(matFinal, position);
output.color = color;
output.texcoord = texcoord;
return output;
}
From the following code, I didn't see how you pass the texture from RenderLeft() to RenderRight(). You just pass backbuffer to RenderRight().
RenderLeft(pTextureLeftRenderView, matFinalLeft, viewportLeft, true);
RenderRight(backbuffer, matFinalRight, viewportRight, false);
So the result is the texture rendered to the left viewport and the right viewport only show the color(green) of the backbuffer.