I'm trying to change the data in a Texture2D with Map/Unmap and am running into an issue where the texture seems to only appear in the top 5th of the screen and the rest is just random pixels. (screenshot for clarification)
Here's some code:
uint32_t* m_colorBuffer; //buffer to fill the texture with random colors
D3D11_SUBRESOURCE_DATA m_textureData;
ID3D11Texture2D* m_texture;
D3D11_MAPPED_SUBRESOURCE mappedResource;
m_deviceContext->Map(m_texture, 0, D3D11_MAP_WRITE_DISCARD, 0, &mappedResource);
uint32_t rowSize = 256 * 4;
BYTE* mappedData = reinterpret_cast<BYTE*>(mappedResource.pData);
for (int i = 0; i < 256; i++) {
memcpy(mappedData, &(m_colorBuffer[i*rowSize]), rowSize);
mappedData += mappedResource.RowPitch;
}
m_deviceContext->Unmap(m_texture, 0);
Can Someone please tell me what I'm doing wrong here?
I'd be happy to supply more code if needed.
Looks like my rowSize is in bytes and I'm using it to index m_colorBuffer.
Related
I am trying to render to the OpenGL Framebuffer via an OpenGL Renderbuffer from an OpenCL kernel. The issue is: Even though I can (propably) render/write to the Renderbuffer from an OpenCL kernel, the screen stays empty (-> Black).
I am getting to my limits of what I can test in finite time, so I am asking someone with much more experience to give a tip, about what I am missing.
I personally suspect that I forgot to Bind a Buffer at the right point, but since I don't see which and where, this is practically impossible to check.
Now for some reduced code (So you don't have to look at all the error checking etc.)(This is the function that is called during the render routine):
void TestBuffer(){
GLubyte *buffer = (GLubyte *) malloc(1000 * 1000 * 4);
glReadBuffer(GL_COLOR_ATTACHMENT0);
error = glGetError();
if(error != GL_NO_ERROR){
printf("error with readBuffer, %i\n", error);
}
glReadPixels(0, 0, 1000, 1000, GL_RGBA, GL_UNSIGNED_BYTE, (GLvoid *)buffer);
error = glGetError();
if(error != GL_NO_ERROR){
printf("error with readpixels\n");
}
for(int i = 0; i < 1000*100; i++){
if(buffer[i] != 0){
printf("buffer was not empty # %i: %u\n", i, buffer[i]);
free(buffer);
return;
}
}
printf("buffer was empty\n");
free(buffer);
}
void runShader(){
glFinish(); //Make sure, that OpenGL isn't using our objects
ret = clEnqueueAcquireGLObjects(command_queue, 1, &cl_renderBuffer, 0, NULL, NULL);
// Execute the OpenCL kernel on the list
size_t global_item_size = 1000 * 1000; // Process the entire lists
size_t local_item_size = 1000; // Divide work items into groups of SceenWidth
ret = clEnqueueNDRangeKernel(command_queue, kernel, 1, NULL, &global_item_size, &local_item_size, 0, NULL, NULL);
ret = clEnqueueReleaseGLObjects(command_queue, 1, &cl_renderBuffer, 0, NULL, NULL);
clFlush(command_queue);
clFinish(command_queue);
// We are going to blit into the window (default framebuffer)
glBindFramebuffer (GL_DRAW_FRAMEBUFFER, 0);
glDrawBuffer (GL_BACK); // Use backbuffer as color dst.
// Read from your FBO
glBindFramebuffer (GL_READ_FRAMEBUFFER, gl_frameBuffer);
glReadBuffer (GL_COLOR_ATTACHMENT0); // Use Color Attachment 0 as color src.
// Copy the color and depth buffer from your FBO to the default framebuffer
glBlitFramebuffer (0,0, 1000, 1000, 0,0, 1000, 1000, GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT, GL_NEAREST);
TestBuffer();
}
My ideas where:
Blit the contents of the renderbuffer to the screenbuffer, in case I messed up with binding the new framebuffer object (created earlier), or attaching the renderbuffer (which you can see in the last few lines of the code)
Check, if I messed up with the double Buffer or sth.: this is the TestBuffer() function
Flushing before Finishing, just in case
The shader/kernel code is simple on purpose, to see if the other stuff actually works (.w should be alpha, which should be opaque, so we can see the result, the rest is just a gray rainbow):
#pragma OPENCL EXTENSION all : enable
#define ScreenWidth 1000
#define ScreenHight 1000
const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_NONE | CLK_FILTER_NEAREST;
__kernel void rainbow(__write_only image2d_t asd) {
int i = get_global_id(0);
unsigned int x = i%ScreenWidth;
unsigned int y = i/ScreenHight;
uint4 pixel; //I wish, I could access this as an array
pixel.x = i;
pixel.y = i;
pixel.z = i;
pixel.w = 255;
write_imageui(asd, (int2)(x, y), pixel);
}
Some further information:
I am only rendering stuff to the COLOR_ATTACHMENT0, since I don't care about the depth or stencil buffer in my usecase. This could be an issue though. (I didn't even generate buffers for them)
I am compiling for Windows 10
The format of the Renderbuffer is RGBA8, but I think the natural format is RGBA24. It once was just RGBA as you can see in the TestBuffer Routine, but I think this should be fine.
What could cause the screen to stay black/empty?
I'm using DirectX to draw the images with RGB data in buffer. The fllowing is sumary code:
// create the vertex buffer
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DYNAMIC; // write access access by CPU and GPU
bd.ByteWidth = sizeOfOurVertices; // size is the VERTEX struct * pW*pH
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER; // use as a vertex buffer
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE; // allow CPU to write in buffer
dev->CreateBuffer(&bd, NULL, &pVBuffer); // create the buffer
//Create Sample for texture
D3D11_SAMPLER_DESC desc;
desc.Filter = D3D11_FILTER_ANISOTROPIC;
desc.MaxAnisotropy = 16;
ID3D11SamplerState *ppSamplerState = NULL;
dev->CreateSamplerState(&desc, &ppSamplerState);
devcon->PSSetSamplers(0, 1, &ppSamplerState);
//Create list vertices from RGB data buffer
pW = bitmapSource->PixelWidth;
pH = bitmapSource->PixelHeight;
OurVertices = new VERTEX[pW*pH];
vIndex = 0;
unsigned char* curP = rgbPixelsBuff;
for (y = 0; y < pH; y++)
{
for (x = 0; x < pW; x++)
{
OurVertices[vIndex].Color.b = *curP++;
OurVertices[vIndex].Color.g = *curP++;
OurVertices[vIndex].Color.r = *curP++;
OurVertices[vIndex].Color.a = *curP++;
OurVertices[vIndex].X = x;
OurVertices[vIndex].Y = y;
OurVertices[vIndex].Z = 0.0f;
vIndex++;
}
}
sizeOfOurVertices = sizeof(VERTEX)* pW*pH;
// copy the vertices into the buffer
D3D11_MAPPED_SUBRESOURCE ms;
devcon->Map(pVBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &ms); // map the buffer
memcpy(ms.pData, OurVertices, sizeOfOurVertices); // copy the data
devcon->Unmap(pVBuffer, NULL);
// unmap the buffer
// clear the back buffer to a deep blue
devcon->ClearRenderTargetView(backbuffer, D3DXCOLOR(0.0f, 0.2f, 0.4f, 1.0f));
// select which vertex buffer to display
UINT stride = sizeof(VERTEX);
UINT offset = 0;
devcon->IASetVertexBuffers(0, 1, &pVBuffer, &stride, &offset);
// select which primtive type we are using
devcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_POINTLIST);
// draw the vertex buffer to the back buffer
devcon->Draw(pW*pH, 0);
// switch the back buffer and the front buffer
swapchain->Present(0, 0);
When the viewport's size is smaller or equal the image's size => everything is ok. But when viewport's size is lager image's size => the image's quality is very bad.
I've searched and tried to use desc.Filter = D3D11_FILTER_ANISOTROPIC;as above code (I've tried to use D3D11_FILTER_MIN_POINT_MAG_MIP_LINEAR or D3D11_FILTER_MIN_LINEAR_MAG_MIP_POINTtoo), but the result is not better. The following images are result of displaying:
Someone can tell me the way to fix it.
Many thanks!
You are drawing each pixel as a point using DirectX. It is normal that when the screen size gets bigger, your points will move apart and the quality will be bad. You should draw a textured quad instead, using a texture that you fill with your RGB data and a pixel shader.
I'm trying to render a texture to the screen using DirectX without DirectXTK.
This is the texture that I am trying to render on screen (512x512px):
The texture loads correctly but when it is put on the screen, it comes up like this:
I noticed that the rendered image seems to be the texture split four times in the x-direction and many times in the y-direction. The tiles seem to increase in height as the texture is rendered farther down the screen.
I have two thoughts as to how the texture was rendered incorrectly.
I could have initialized the texture incorrectly.
I could have improperly setup my texture sampler.
Regarding improper texture initialization, here is the code that I used to initialize the texture.
Texture2D & Shader Resource View Creation Code
Load Texture Data
This loads the texture for a PNG file into a vector of unsigned chars and sets the width and height of the texture.
std::vector<unsigned char> fileData;
if (!loadFileToBuffer(fileName, fileData))
return nullptr;
std::vector<unsigned char> imageData;
unsigned long width;
unsigned long height;
decodePNG(imageData, width, height, fileData.data(), fileData.size());
Create Texture Description
D3D11_TEXTURE2D_DESC texDesc;
ZeroMemory(&texDesc, sizeof(D3D11_TEXTURE2D_DESC));
texDesc.Width = width;
texDesc.Height = height;
texDesc.MipLevels = 1;
texDesc.ArraySize = 1;
texDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
texDesc.SampleDesc.Count = 1;
texDesc.SampleDesc.Quality = 0;
texDesc.Usage = D3D11_USAGE_DYNAMIC;
texDesc.BindFlags = D3D11_BIND_SHADER_RESOURCE;
texDesc.CPUAccessFlags = D3D10_CPU_ACCESS_WRITE;
Assign Texture Subresource Data
D3D11_SUBRESOURCE_DATA texData;
ZeroMemory(&texData, sizeof(D3D11_SUBRESOURCE_DATA));
texData.pSysMem = (void*)imageData.data();
texData.SysMemPitch = sizeof(unsigned char) * width;
//Create DirectX Texture In The Cache
HR(m_pDevice->CreateTexture2D(&texDesc, &texData, &m_textures[fileName]));
Create Shader Resource View for Texture
D3D11_SHADER_RESOURCE_VIEW_DESC srDesc;
ZeroMemory(&srDesc, sizeof(D3D11_SHADER_RESOURCE_VIEW_DESC));
srDesc.Format = DXGI_FORMAT_R8G8B8A8_UNORM;
srDesc.ViewDimension = D3D11_SRV_DIMENSION_TEXTURE2D;
srDesc.Texture2D.MipLevels = 1;
HR(m_pDevice->CreateShaderResourceView(m_textures[fileName], &srDesc,
&m_resourceViews[fileName]));
return m_resourceViews[fileName];//This return value is used as "texture" in the next line
Use The Texture Resource
m_pDeviceContext->PSSetShaderResources(0, 1, &texture);
I have messed around with the MipLevels and SampleDesc.Quality variables to see if they were changing something about the texture but changing them either made the texture black or did nothing to change it.
I also looked into the the SysMemPitch variable and made sure that it aligned with MSDN
Regarding setting up my sampler incorrectly, here is the code that I used to initialize my sampler.
//Setup Sampler
D3D11_SAMPLER_DESC samplerDesc;
ZeroMemory(&samplerDesc, sizeof(D3D11_SAMPLER_DESC));
samplerDesc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
samplerDesc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP;
samplerDesc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP;
samplerDesc.AddressW = D3D11_TEXTURE_ADDRESS_CLAMP;
samplerDesc.MipLODBias = 0.0f;
samplerDesc.MaxAnisotropy = 1;
samplerDesc.ComparisonFunc = D3D11_COMPARISON_NEVER;
samplerDesc.BorderColor[0] = 1.0f;
samplerDesc.BorderColor[1] = 1.0f;
samplerDesc.BorderColor[2] = 1.0f;
samplerDesc.BorderColor[3] = 1.0f;
samplerDesc.MinLOD = -FLT_MAX;
samplerDesc.MaxLOD = FLT_MAX;
HR(m_pDevice->CreateSamplerState(&samplerDesc, &m_pSamplerState));
//Use the sampler
m_pDeviceContext->PSSetSamplers(0, 1, &m_pSamplerState);
I have tried different AddressU/V/W types to see if the texture was loaded with incorrect width/height and was thus shrunk but changing these did nothing.
My VertexShader passes the texture coordinates through using TEXCOORD0 and my PixelShader uses texture.Sample(samplerState, input.texCoord); to get the color of the pixel.
In summary, I am trying to render a texture but the texture gets tiled and I am not able to figure out why. What do I need to change/do to render just one of my texture?
I think you assign the wrong pitch:
texData.SysMemPitch = sizeof(unsigned char) * width;
should be
texData.SysMemPitch = 4 * sizeof(unsigned char) * width;
because each pixels has DXGI_FORMAT_R8G8B8A8_UNORM format and occupies 4 bytes.
I'm trying to create 2 vertex buffers, one that only stores positions and another that only stores colors. This is just an exercise from Frank Luna's book to become familiar with vertex description, layouts and buffer options. The problem is that I feel like I have made all the relevant changes and although I am getting the geometry to show, the color buffer is not working. To complete the exercise, instead of using the book's Vertex vertices[]={{...},{...},...} from the example code where each Vertex stores the position and the color, I am using XMFLOAT3 pos[]={x,y,z...} for positions and XMFLOAT4 colors[]={a,b,c,...}. I have modified the vertex description to make each element reference the correct input spot:
D3D11_INPUT_ELEMENT_DESC vertexDesc[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, 0, D3D11_INPUT_PER_VERTEX_DATA, 0}
};
I've also changed the vertex buffer on the device to expect two vertex buffers instead of just mBoxVB which previously was a resource based on Vertex vertices[]:
ID3D11Buffer* mBoxVB;
ID3D11Buffer* mBoxVBCol;
ID3D11Buffer* combined[2];
...
combined[0]=mBoxVB;
combined[1]=mBoxVBCol;
...
UINT stride[] = {sizeof(XMFLOAT3), sizeof(XMFLOAT4)};
UINT offset = 0;
md3dImmediateContext->IASetVertexBuffers(0, 2, combined, stride, &offset);
If of any help, this is the buffer creation code:
D3D11_BUFFER_DESC vbd;
vbd.Usage = D3D11_USAGE_IMMUTABLE;
vbd.ByteWidth = sizeof(XMFLOAT3)*8;
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd.CPUAccessFlags = 0;
vbd.MiscFlags = 0;
vbd.StructureByteStride = 0;
D3D11_SUBRESOURCE_DATA vinitData;
vinitData.pSysMem = Shapes::boxEx1Pos;
HR(md3dDevice->CreateBuffer(&vbd, &vinitData, &mBoxVB));
D3D11_BUFFER_DESC cbd;
vbd.Usage = D3D11_USAGE_IMMUTABLE;
vbd.ByteWidth = sizeof(XMFLOAT4)*8;
vbd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vbd.CPUAccessFlags = 0;
vbd.MiscFlags = 0;
vbd.StructureByteStride = 0;
D3D11_SUBRESOURCE_DATA cinitData;
cinitData.pSysMem = Shapes::boxEx1Col;
HR(md3dDevice->CreateBuffer(&vbd, &cinitData, &mBoxVBCol));
If I use combined[0]=mBoxVBCol, instead of combined[0]=mBoxVB I get a different shape so I know there is some data there but I'm not sure why the Color element is not getting sent through to the vertex shader, it seems to me that the second slot is being ignored in the operation.
I found the answer to the problem. It was simply that although I was specifying the stride array for both buffers, I was only giving one offset, so once I changed UINT offset=0; to UINT offset[]={0,0}; it worked.
I am uploading an interlaced image to an OpenGL texture using glTexImage2D which of course uploads whole image. What I need is to upload only alternate rows, so on first texture odd rows and on second even rows.
I don't want to create another copy of the Pixel Data on CPU.
You can set GL_UNPACK_ROW_LENGTH to twice the actual row length. This will effectively skip every second row. If the size of your texture is width x height:
glPixelStorei(GL_UNPACK_ROW_LENGTH, 2 * width);
glBindTexture(GL_TEXTURE_2D, tex1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, width);
glBindTexture(GL_TEXTURE_2D, tex2);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data);
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glPixelStorei(GL_UNPACK_SKIP_PIXELS, 0);
Instead of setting GL_UNPACK_SKIP_PIXELS to skip the first row, you can also increment the data pointer accordingly.
There is an ancient SGI extension (GL_SGIX_interlace) for transferring interlaced pixel data, but it is probably not supported on your implementation.
An alternative you might consider is memory mapping a Pixel Buffer Object. You can fill this buffer over two passes and then use it as the source of image data in a call to glTexImage2D (...). You essentially do the de-interlacing yourself, but since this is done by mapping a buffer object's memory you are not making an unnecessary copy of the image on the CPU.
Pseudo code showing how to do this:
GLuint deinterlace_pbo;
glGenBuffers (1, &deinterlace_pbo);
// `GL_PIXEL_UNPACK_BUFFER`, when non-zero is the source of memory for `glTexImage2D`
glBindBuffer (GL_PIXEL_UNPACK_BUFFER, deinterlace_pbo);
// Reserve memory for the de-interlaced image
glBufferData (GL_PIXEL_UNPACK_BUFFER, sizeof (pixel) * interlaced_rows * width * 2,
NULL, GL_STATIC_DRAW);
// Returns a pointer to the ***GL-managed memory*** where you will write the image
void* pixel_data = glMapBuffer (GL_PIXEL_UNPACK_BUFFER, GL_WRITE_ONLY);
// Odd Rows First
for (int i = 0; i < interlaced_rows; i++) {
for (int j = 0; j < width; j++) {
//Fill in pixel_data for each pixel in row (i*2+1)
}
}
// Even Rows
for (int i = 0; i < interlaced_rows; i++) {
for (int j = 0; j < width; j++) {
//Fill in pixel_data for each pixel in row (i*2)
}
}
glUnmapBuffer ();
// This will read the memory in the object bound to `GL_PIXEL_UNPACK_BUFFER`
glTexImage2D (..., NULL);
glBindBuffer (GL_PIXEL_UNPACK_BUFFER, 0);