DirectX: Draw bitmap image scale up in viewport caused low quality? - c++

I'm using DirectX to draw the images with RGB data in buffer. The fllowing is sumary code:
// create the vertex buffer
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DYNAMIC; // write access access by CPU and GPU
bd.ByteWidth = sizeOfOurVertices; // size is the VERTEX struct * pW*pH
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER; // use as a vertex buffer
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; // allow CPU to write in buffer
dev->CreateBuffer(&bd, NULL, &pVBuffer); // create the buffer
//Create Sample for texture
D3D11_SAMPLER_DESC desc;
desc.Filter = D3D11_FILTER_ANISOTROPIC;
desc.MaxAnisotropy = 16;
ID3D11SamplerState *ppSamplerState = NULL;
dev->CreateSamplerState(&desc, &ppSamplerState);
devcon->PSSetSamplers(0, 1, &ppSamplerState);
//Create list vertices from RGB data buffer
pW = bitmapSource->PixelWidth;
pH = bitmapSource->PixelHeight;
OurVertices = new VERTEX[pW*pH];
vIndex = 0;
unsigned char* curP = rgbPixelsBuff;
for (y = 0; y < pH; y++)
{
for (x = 0; x < pW; x++)
{
OurVertices[vIndex].Color.b = *curP++;
OurVertices[vIndex].Color.g = *curP++;
OurVertices[vIndex].Color.r = *curP++;
OurVertices[vIndex].Color.a = *curP++;
OurVertices[vIndex].X = x;
OurVertices[vIndex].Y = y;
OurVertices[vIndex].Z = 0.0f;
vIndex++;
}
}
sizeOfOurVertices = sizeof(VERTEX)* pW*pH;
// copy the vertices into the buffer
D3D11_MAPPED_SUBRESOURCE ms;
devcon->Map(pVBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, OurVertices, sizeOfOurVertices); // copy the data
devcon->Unmap(pVBuffer, NULL);
// unmap the buffer
// clear the back buffer to a deep blue
devcon->ClearRenderTargetView(backbuffer, D3DXCOLOR(0.0f, 0.2f, 0.4f, 1.0f));
// select which vertex buffer to display
UINT stride = sizeof(VERTEX);
UINT offset = 0;
devcon->IASetVertexBuffers(0, 1, &pVBuffer, &stride, &offset);
// select which primtive type we are using
devcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
// draw the vertex buffer to the back buffer
devcon->Draw(pW*pH, 0);
// switch the back buffer and the front buffer
swapchain->Present(0, 0);
When the viewport's size is smaller or equal the image's size => everything is ok. But when viewport's size is lager image's size => the image's quality is very bad.
I've searched and tried to use desc.Filter = D3D11_FILTER_ANISOTROPIC;as above code (I've tried to use D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR or D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINTtoo), but the result is not better. The following images are result of displaying:
Someone can tell me the way to fix it.
Many thanks!

You are drawing each pixel as a point using DirectX. It is normal that when the screen size gets bigger, your points will move apart and the quality will be bad. You should draw a textured quad instead, using a texture that you fill with your RGB data and a pixel shader.

Related

How do you update the vertex buffer or constant buffer attached to a sprite to move it smoothly across the screen in Direct3D 11?

I have attached a texture to a set of 4 indexed vertices which are stored in a dynamic vertex buffer. I have also added a translation matrix to the constant buffer of the vertex shader. However, when I try updating the constant buffer to alter the translation matrix so the I can move the sprite, the sprite does not move smoothly. It stops randomly for short amounts of time before moving a short distance again.
Below are the render functions, the main loop and the shaders being used:
void Sprite::Render(ID3D11DeviceContext* devcon, float dt) {
// 2D rendering on backbuffer here
UINT stride = sizeof(VERTEX);
UINT offset = 0;
spr_const_data.translateMatrix.r[3].m128_f32[0] += 60.0f*dt;
devcon->IASetInputLayout(m_data_layout);
devcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
devcon->VSSetShader(m_spr_vert_shader, 0, 0);
devcon->PSSetShader(m_spr_pixel_shader, 0, 0);
devcon->VSSetConstantBuffers(0, 1, &m_spr_const_buffer);
devcon->PSSetSamplers(0, 1, &m_tex_sampler_state);
devcon->PSSetShaderResources(0, 1, &m_shader_resource_view);
// select vertex and index buffers
devcon->IASetIndexBuffer(m_sprite_index_buffer, DXGI_FORMAT_R32_UINT, offset);
devcon->IASetVertexBuffers(0, 1, &m_sprite_vertex_buffer, &stride, &offset);
D3D11_MAPPED_SUBRESOURCE ms;
ZeroMemory(&ms, sizeof(D3D11_MAPPED_SUBRESOURCE));
devcon->Map(m_spr_const_buffer, 0, D3D11_MAP_WRITE_DISCARD, 0, &ms);
memcpy(ms.pData, &spr_const_data, sizeof(spr_const_data));
devcon->Unmap(m_spr_const_buffer, 0);
// select which primitive type to use
// draw vertex buffer to backbuffer
devcon->DrawIndexed(6, 0, 0);
}
void RenderFrame(float dt) {
float background_color[] = { 1.0f, 1.0f, 1.0f, 1.0f };
// clear backbuffer
devcon->ClearRenderTargetView(backbuffer, background_color);
knight->Render(devcon, dt);
// switch back and front buffer
swapchain->Present(0, 0);
}
void MainLoop() {
MSG msg;
auto tp1 = std::chrono::system_clock::now();
auto tp2 = std::chrono::system_clock::now();
while (GetMessage(&msg, nullptr, 0, 0) > 0) {
tp2 = std::chrono::system_clock::now();
std::chrono::duration<float> dt = tp2 - tp1;
tp1 = tp2;
TranslateMessage(&msg);
DispatchMessage(&msg);
RenderFrame(dt.count());
}
}
cbuffer CONST_BUFFER_DATA : register(b0)
{
matrix orthoMatrix;
matrix translateMatrix;
};
struct VOut {
float4 position : SV_POSITION;
float2 tex : TEXCOORD0;
};
VOut VShader(float4 position : POSITION, float2 tex : TEXCOORD0) {
VOut output;
position = mul(translateMatrix, position);
output.position = mul(orthoMatrix, position);
output.tex = tex;
return output;
}
Texture2D square_tex;
SamplerState tex_sampler;
float4 PShader(float4 position : SV_POSITION, float2 tex : TEXCOORD0) : SV_TARGET
{
float4 tex_col = square_tex.Sample(tex_sampler, tex);
return tex_col;
}
I have also included the initialization of the swapchain and backbuffer in case there is a mistake owing to it.
DXGI_SWAP_CHAIN_DESC scd; // hold swap chain information
ZeroMemory(&scd, sizeof(DXGI_SWAP_CHAIN_DESC));
// fill swap chian description struct
scd.BufferCount = 1; // one back buffer
scd.BufferDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM; // use 32-bit color
scd.BufferUsage = DXGI_USAGE_RENDER_TARGET_OUTPUT; // how swap chain is to be used (draw into back buffer)
scd.OutputWindow = hWnd; // window to be used
scd.SampleDesc.Count = 1; // how many multisamples
scd.Windowed = true; // windowed/full screen
// create device, device context and swap chain using scd
D3D11CreateDeviceAndSwapChain(nullptr, D3D_DRIVER_TYPE_HARDWARE, nullptr, D3D11_CREATE_DEVICE_DEBUG, nullptr, NULL, D3D11_SDK_VERSION, &scd, &swapchain, &dev, nullptr, &devcon);
// get address of back buffer
ID3D11Texture2D* pBackBuffer;
swapchain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&pBackBuffer);
// use back buffer address to create render target
dev->CreateRenderTargetView(pBackBuffer, nullptr, &backbuffer);
pBackBuffer->Release();
// set the render target as the backbuffer
devcon->OMSetRenderTargets(1, &backbuffer, nullptr);
// Set the viewport
D3D11_VIEWPORT viewport;
ZeroMemory(&viewport, sizeof(D3D11_VIEWPORT));
viewport.TopLeftX = 0;
viewport.TopLeftY = 0;
viewport.Width = WINDOW_WIDTH;
viewport.Height = WINDOW_HEIGHT;
devcon->RSSetViewports(1, &viewport); // activates viewport
I have also tried getting a pointer to the vertex data via the pData member of the D3D11_MAPPED_SUBRESOURCE object and then casting it to a VERTEX* to manipulate the data, but the problem persists. I would like to know how to move the sprite smoothly across the window.
I solved the problem by writing a fixed FPS game loop and giving the interpolated values between the previous and current states to the render function. I was also updating the constant buffer in an incorrect manner.

Jumbled Texture output when using D3D11_MAPPED_SUBRESOURCE and Map/Unmap

I'm trying to change the data in a Texture2D with Map/Unmap and am running into an issue where the texture seems to only appear in the top 5th of the screen and the rest is just random pixels. (screenshot for clarification)
Here's some code:
uint32_t* m_colorBuffer; //buffer to fill the texture with random colors
D3D11_SUBRESOURCE_DATA m_textureData;
ID3D11Texture2D* m_texture;
D3D11_MAPPED_SUBRESOURCE mappedResource;
m_deviceContext->Map(m_texture, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
uint32_t rowSize = 256 * 4;
BYTE* mappedData = reinterpret_cast<BYTE*>(mappedResource.pData);
for (int i = 0; i < 256; i++) {
memcpy(mappedData, &(m_colorBuffer[i*rowSize]), rowSize);
mappedData += mappedResource.RowPitch;
}
m_deviceContext->Unmap(m_texture, 0);
Can Someone please tell me what I'm doing wrong here?
I'd be happy to supply more code if needed.
Looks like my rowSize is in bytes and I'm using it to index m_colorBuffer.

DirectX Texture Not Drawing Correctly

I'm trying to render a texture to the screen using DirectX without DirectXTK.
This is the texture that I am trying to render on screen (512x512px):
The texture loads correctly but when it is put on the screen, it comes up like this:
I noticed that the rendered image seems to be the texture split four times in the x-direction and many times in the y-direction. The tiles seem to increase in height as the texture is rendered farther down the screen.
I have two thoughts as to how the texture was rendered incorrectly.
I could have initialized the texture incorrectly.
I could have improperly setup my texture sampler.
Regarding improper texture initialization, here is the code that I used to initialize the texture.
Texture2D & Shader Resource View Creation Code
Load Texture Data
This loads the texture for a PNG file into a vector of unsigned chars and sets the width and height of the texture.
std::vector<unsigned char> fileData;
if (!loadFileToBuffer(fileName, fileData))
return nullptr;
std::vector<unsigned char> imageData;
unsigned long width;
unsigned long height;
decodePNG(imageData, width, height, fileData.data(), fileData.size());
Create Texture Description
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(D3D11_TEXTURE2D_DESC));
texDesc.Width = width;
texDesc.Height = height;
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DYNAMIC;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
Assign Texture Subresource Data
D3D11_SUBRESOURCE_DATA texData;
ZeroMemory(&texData, sizeof(D3D11_SUBRESOURCE_DATA));
texData.pSysMem = (void*)imageData.data();
texData.SysMemPitch = sizeof(unsigned char) * width;
//Create DirectX Texture In The Cache
HR(m_pDevice->CreateTexture2D(&texDesc, &texData, &m_textures[fileName]));
Create Shader Resource View for Texture
D3D11_SHADER_RESOURCE_VIEW_DESC srDesc;
ZeroMemory(&srDesc, sizeof(D3D11_SHADER_RESOURCE_VIEW_DESC));
srDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
srDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srDesc.Texture2D.MipLevels = 1;
HR(m_pDevice->CreateShaderResourceView(m_textures[fileName], &srDesc,
&m_resourceViews[fileName]));
return m_resourceViews[fileName];//This return value is used as "texture" in the next line
Use The Texture Resource
m_pDeviceContext->PSSetShaderResources(0, 1, &texture);
I have messed around with the MipLevels and SampleDesc.Quality variables to see if they were changing something about the texture but changing them either made the texture black or did nothing to change it.
I also looked into the the SysMemPitch variable and made sure that it aligned with MSDN
Regarding setting up my sampler incorrectly, here is the code that I used to initialize my sampler.
//Setup Sampler
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP;
samplerDesc.MipLODBias = 0.0f;
samplerDesc.MaxAnisotropy = 1;
samplerDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
samplerDesc.BorderColor[0] = 1.0f;
samplerDesc.BorderColor[1] = 1.0f;
samplerDesc.BorderColor[2] = 1.0f;
samplerDesc.BorderColor[3] = 1.0f;
samplerDesc.MinLOD = -FLT_MAX;
samplerDesc.MaxLOD = FLT_MAX;
HR(m_pDevice->CreateSamplerState(&samplerDesc, &m_pSamplerState));
//Use the sampler
m_pDeviceContext->PSSetSamplers(0, 1, &m_pSamplerState);
I have tried different AddressU/V/W types to see if the texture was loaded with incorrect width/height and was thus shrunk but changing these did nothing.
My VertexShader passes the texture coordinates through using TEXCOORD0 and my PixelShader uses texture.Sample(samplerState, input.texCoord); to get the color of the pixel.
In summary, I am trying to render a texture but the texture gets tiled and I am not able to figure out why. What do I need to change/do to render just one of my texture?
I think you assign the wrong pitch:
texData.SysMemPitch = sizeof(unsigned char) * width;
should be
texData.SysMemPitch = 4 * sizeof(unsigned char) * width;
because each pixels has DXGI_FORMAT_R8G8B8A8_UNORM format and occupies 4 bytes.

Wrong depth buffer (to texture) output?

For the SSAO effect I have to generate two textures: normals (in view space) and depth.
I decided to use depth buffer as texture according to Microsoft tutorial (the Reading the Depth-Stencil Buffer as a Texture chapter).
Unfortunately, after rendering I got none information from the depth buffer (the lower image):
I guess it's not right. And what is strange, the depth buffer seems to work (I get the right order of faces etc.).
The depth buffer code:
//create depth stencil texture (depth buffer)
D3D11_TEXTURE2D_DESC descDepth;
ZeroMemory(&descDepth, sizeof(descDepth));
descDepth.Width = width;
descDepth.Height = height;
descDepth.MipLevels = 1;
descDepth.ArraySize = 1;
descDepth.Format = DXGI_FORMAT_R24G8_TYPELESS;
descDepth.SampleDesc.Count = antiAliasing.getCount();
descDepth.SampleDesc.Quality = antiAliasing.getQuality();
descDepth.Usage = D3D11_USAGE_DEFAULT;
descDepth.BindFlags = D3D11_BIND_DEPTH_STENCIL | D3D11_BIND_SHADER_RESOURCE;
descDepth.CPUAccessFlags = 0;
descDepth.MiscFlags = 0;
ID3D11Texture2D* depthStencil = NULL;
result = device->CreateTexture2D(&descDepth, NULL, &depthStencil);
ERROR_HANDLE(SUCCEEDED(result), L"Could not create depth stencil texture.", MOD_GRAPHIC);
D3D11_SHADER_RESOURCE_VIEW_DESC shaderResourceViewDesc;
//setup the description of the shader resource view
shaderResourceViewDesc.Format = DXGI_FORMAT_R24_UNORM_X8_TYPELESS;
shaderResourceViewDesc.ViewDimension = antiAliasing.isOn() ? D3D11_SRV_DIMENSION_TEXTURE2DMS : D3D11_SRV_DIMENSION_TEXTURE2D;
shaderResourceViewDesc.Texture2D.MostDetailedMip = 0;
shaderResourceViewDesc.Texture2D.MipLevels = 1;
//create the shader resource view.
ERROR_HANDLE(SUCCEEDED(device->CreateShaderResourceView(depthStencil, &shaderResourceViewDesc, &depthStencilShaderResourceView)),
L"Could not create shader resource view for depth buffer.", MOD_GRAPHIC);
createDepthStencilStates();
//set the depth stencil state.
context->OMSetDepthStencilState(depthStencilState3D, 1);
D3D11_DEPTH_STENCIL_VIEW_DESC depthStencilViewDesc;
// Initialize the depth stencil view.
ZeroMemory(&depthStencilViewDesc, sizeof(depthStencilViewDesc));
// Set up the depth stencil view description.
depthStencilViewDesc.Format = DXGI_FORMAT_D24_UNORM_S8_UINT;
depthStencilViewDesc.ViewDimension = antiAliasing.isOn() ? D3D11_DSV_DIMENSION_TEXTURE2DMS : D3D11_DSV_DIMENSION_TEXTURE2D;
depthStencilViewDesc.Texture2D.MipSlice = 0;
//depthStencilViewDesc.Flags = D3D11_DSV_READ_ONLY_DEPTH;
// Create the depth stencil view.
result = device->CreateDepthStencilView(depthStencil, &depthStencilViewDesc, &depthStencilView);
ERROR_HANDLE(SUCCEEDED(result), L"Could not create depth stencil view.", MOD_GRAPHIC);
After rendering with first pass, I set the depth stencil as texture resource along with other render targets (color, normals), appending it to array:
ID3D11ShaderResourceView ** textures = new ID3D11ShaderResourceView *[targets.size()+1];
for (unsigned i = 0; i < targets.size(); i++) {
textures[i] = targets[i]->getShaderResourceView();
}
textures[targets.size()] = depthStencilShaderResourceView;
context->PSSetShaderResources(0, targets.size()+1, textures);
Before second pass I call context->OMSetRenderTargets(1, &myRenderTargetView, NULL); to unbind depth buffer (so I can use it as texture).
Then, I render my textures (render targets from first pass + depth buffer) with trivial post-process shader, just for debug purpose (second pass):
Texture2D ColorTexture[3];
SamplerState ObjSamplerState;
float4 main(VS_OUTPUT input) : SV_TARGET0{
float4 Color;
Color = float4(0, 1, 1, 1);
float2 textureCoordinates = input.textureCoordinates.xy * 2;
if (input.textureCoordinates.x < 0.5f && input.textureCoordinates.y < 0.5f) {
Color = ColorTexture[0].Sample(ObjSamplerState, textureCoordinates);
}
if (input.textureCoordinates.x > 0.5f && input.textureCoordinates.y < 0.5f) {
textureCoordinates.x -= 0.5f;
Color = ColorTexture[1].Sample(ObjSamplerState, textureCoordinates);
}
if (input.textureCoordinates.x < 0.5f && input.textureCoordinates.y > 0.5f) { //depth texture
textureCoordinates.y -= 0.5f;
Color = ColorTexture[2].Sample(ObjSamplerState, textureCoordinates);
}
...
It works fine for normals texture. Why it doesn't for depth buffer (as shader resource view)?
As per comments:
The texture was rendered and sampled correctly but the data appeared to be uniformly red due to the data lying between 0.999 and 1.0f.
There are a few things you can do to improve the available depth precision, but the simplest of which is to simply ensure your near and far clip distances are not excessively small/large for the scene you're drawing.
Assuming metres are your unit, a near clip of 0.1 (10cm) and a far clip of 200 (metres) are much more reasonable than 1cm and 20km.
Even so, don't expect to see too many black/dark areas, the non linear nature of a z-buffer is still going to mean most of your depth values are shunted up towards 1. If visualisation of the depth buffer is important then simply rescale the data to the normalised 0-1 range before displaying it.

DirectX11 Mapping/Unmapping has no effect on rendered geometry

I have a simple rendering program I just made, but it refuses to draw anything but what I set the initial vertices to. Even if I call Map() and Unmap(), the geometry doesn't seem to change. I have a feeling it has to do with Map() and Unmap(), but I'm not sure. Right now, my initial vertex data consists of one triangle. Then I map the vertex buffer with a new set of vertices which consists of two triangles, but they aren't rendered. Only one triangle is rendered even though I pass in 6 for the vertex count in the draw function. Here is my setup code:
VertexData vertexData[] =
{
{0.0f,0.0f,0.0f,0.0f,0.0f,1.0f,0.0f,0.0f},
{0.0f,1.0f,0.0f,0.0f,0.0f,1.0f,0.0f,0.0f},
{1.0f,1.0f,0.0f,0.0f,0.0f,1.0f,0.0f,0.0f}
};
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd,sizeof(bd));
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(VertexData)*256;
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
D3D11_SUBRESOURCE_DATA vertexBufferData;
vertexBufferData.pSysMem = vertexData;
vertexBufferData.SysMemPitch = 0;
vertexBufferData.SysMemSlicePitch = 0;
_device->CreateBuffer(&bd, &vertexBufferData, &_renderBuffer);
Mapping function:
data is a vector containing six VertexData structs, for six vertices
D3D11_MAPPED_SUBRESOURCE ms;
ZeroMemory(&ms,sizeof(D3D11_MAPPED_SUBRESOURCE));
_deviceContext->Map(_renderBuffer,NULL,D3D11_MAP_WRITE_DISCARD,NULL,&ms);
memcpy(ms.pData,&data[0],sizeof(VertexData)*data.size());
_deviceContext->Unmap(_renderBuffer,NULL);
And here is my rendering code:
_deviceContext->ClearRenderTargetView(_backBuffer,D3DXCOLOR(0.0f,0.0f,0.0f,1.0f));
_deviceContext->Draw(6,0);
_swapChain->Present(0,0);
EDIT: Disabled backface culling, but the triangle is still not appearing.
Mapping Function
void Render::CopyDataToBuffers(std::vector<VertexData> data)
{
D3D11_MAPPED_SUBRESOURCE ms;
ZeroMemory(&ms,sizeof(D3D11_MAPPED_SUBRESOURCE));
_deviceContext->Map(_renderBuffer,NULL,D3D11_MAP_WRITE_DISCARD,NULL,&ms);
memcpy(ms.pData,&data[0],sizeof(VertexData)*data.size());
_deviceContext->Unmap(_renderBuffer,NULL);
}
Calling of Mapping function
std::vector<VertexData> vertexDataVec;
vertexDataVec.push_back(vertexData[0]);
vertexDataVec.push_back(vertexData[1]);
vertexDataVec.push_back(vertexData[2]);
vertexDataVec.push_back(vertexData[3]);
vertexDataVec.push_back(vertexData[4]);
vertexDataVec.push_back(vertexData[5]);
Render::GetRender().CopyDataToBuffers(vertexDataVec);
To fix the problem, I just had to create a ID3D11RasterizerState and disable culling.
Here is the structure:
ID3D11RasterizerState* rasterizerState = NULL;
D3D11_RASTERIZER_DESC rd =
{
D3D11_FILL_SOLID,
D3D11_CULL_NONE,
TRUE,
1,
10.0F,
1.0F,
TRUE
};
_device->CreateRasterizerState(&rd,&rasterizerState);
_deviceContext->RSSetState(rasterizerState);