i would like to draw Instances of an obj File. After i implemented the Instancing instead of drawing each Object by his own draw() function (which worked just fine), the Instances are not positioned correctly. Probably the data from the InstanceBuffer is not set in the shader correctly.
D3DMain.cpp - creating input layout
struct INSTANCE {
//D3DXMATRIX matTrans;
D3DXVECTOR3
};
/***/
// create the input layout object
D3D11_INPUT_ELEMENT_DESC ied[] =
{
//vertex buffer
{"POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0},
//instance buffer
{"INSTTRANS", 0, DXGI_FORMAT_R32G32B32_FLOAT, 1, 0, D3D11_INPUT_PER_INSTANCE_DATA, 1},
//{"INSTTRANS", 1, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_INSTANCE_DATA, 1},
//{"INSTTRANS", 2, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_INSTANCE_DATA, 1},
//{"INSTTRANS", 3, DXGI_FORMAT_R32G32B32A32_FLOAT, 1, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_INSTANCE_DATA, 1},
};
if (FAILED(d3ddev->CreateInputLayout(ied, 4, VS->GetBufferPointer(), VS->GetBufferSize(), &pLayout))) throw(std::string("Input Layout Creation Error"));
d3ddevcon->IASetInputLayout(pLayout);
World.cpp - setting up instance buffer
std::vector<INSTANCE> instanceBuffer;
INSTANCE insertInstance;
D3DXMATRIX scaleMat, transMat;
D3DXMatrixScaling(&scaleMat, 50.0f, 50.0f, 50.0f);
int i=0;
for (std::list<SINSTANCES>::iterator it = sInstances.begin(); it != sInstances.end(); it++) {
if ((*it).TypeID == typeId) {
//do something
D3DXMatrixTranslation(&transMat, (*it).pos.x, (*it).pos.y, (*it).pos.z);
insertInstance.matTrans = (*it).pos;//scaleMat * transMat;
instanceBuffer.push_back(insertInstance);
i++;
}
}
instanceCount[typeId] = i;
//create new IB
D3D11_BUFFER_DESC instanceBufferDesc;
ZeroMemory(&instanceBufferDesc, sizeof(instanceBufferDesc));
instanceBufferDesc.Usage = D3D11_USAGE_DEFAULT;
instanceBufferDesc.ByteWidth = sizeof(INSTANCE) * i;
instanceBufferDesc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
instanceBufferDesc.CPUAccessFlags = 0;
instanceBufferDesc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA instanceData;
ZeroMemory(&instanceData, sizeof(instanceData));
instanceData.pSysMem = &instanceBuffer[0];
if (FAILED(d3ddev->CreateBuffer(&instanceBufferDesc, &instanceData, &instanceBufferMap[typeId]))) throw(std::string("Failed to Update Instance Buffer"));
OpenDrawObj.cpp - drawing .obj file
UINT stride[2] = {sizeof(VERTEX), sizeof(INSTANCE)};
UINT offset[2] = {0, 0};
ID3D11Buffer* combinedBuffer[2] = {meshVertBuff, instanceBuffer};
d3ddevcon->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
d3ddevcon->IASetVertexBuffers(0, 2, combinedBuffer, stride, offset);
d3ddevcon->IASetIndexBuffer(meshIndexBuff, DXGI_FORMAT_R32_UINT, 0);
std::map<std::wstring, OBJMATERIAL>::iterator fit;
for (std::vector<DRAWLIST>::iterator it = drawList.begin(); it != drawList.end(); it++) {
fit = objMaterials.find((*it).material);
if (fit != objMaterials.end()) {
if ((*fit).second.texture != NULL) {
d3ddevcon->PSSetShaderResources(0, 1, &((*fit).second.texture));
}
d3ddevcon->DrawIndexedInstanced((*it).indexCount, instanceCount, (*it).startIndex, 0, 0);
}
}
the drawing function (above) is called here: I pass the instance buffer (map(int, ID3D11Buffer*) and the instance numbers)
(*it).second->draw(0.0f, 0.0f, 0.0f, 0, instanceBufferMap[typeId], instanceCount[typeId]);
shader.hlsl
struct VIn
{
float4 position : POSITION;
float3 normal : NORMAL;
float2 texcoord : TEXCOORD;
//row_major float4x4 instTrans : INSTTRANS;
float4 instTrans : INSTTRANS;
uint instanceID : SV_InstanceID;
};
VOut VShader(VIn input)
{
VOut output;
//first: transforming instance
//output.position = mul(input.instTrans, input.position);
output.position = input.position;
output.position.xyz *= 50.0; //scale
output.position.z += input.instTrans.z; //apply only z value
float4 transPos = mul(world, output.position); //transform position with world matrix
output.position = mul(view, transPos); //project to screen
the "input.instTrans" in the last file is incorrect and contains ramdom data.
Do you have any ideas?
So i found the bug, it was at an totally unexpected location...
So here is the code snippet:
ID3D10Blob *VS, *VS2, *PS, *PS2; //<- i only used VS and PS before
//volume shader
if (FAILED(D3DX11CompileFromFile(L"resources/volume.hlsl", 0, 0, "VShader", "vs_5_0", D3D10_SHADER_PREFER_FLOW_CONTROL | D3D10_SHADER_SKIP_OPTIMIZATION, 0, 0, &VS, 0, 0))) throw(std::string("Volume Shader Error 1"));
if (FAILED(D3DX11CompileFromFile(L"resources/volume.hlsl", 0, 0, "PShader", "ps_5_0", D3D10_SHADER_PREFER_FLOW_CONTROL | D3D10_SHADER_SKIP_OPTIMIZATION, 0, 0, &PS, 0, 0))) throw(std::string("Volume Shader Error 2"));
// encapsulate both shaders into shader objects
if (FAILED(d3ddev->CreateVertexShader(VS->GetBufferPointer(), VS->GetBufferSize(), NULL, &pvolumeVS))) throw(std::string("Volume Shader Error 1A"));
if (FAILED(d3ddev->CreatePixelShader(PS->GetBufferPointer(), PS->GetBufferSize(), NULL, &pvolumePS))) throw(std::string("Volume Shader Error 2A"));
//sky shader
if (FAILED(D3DX11CompileFromFile(L"resources/sky.hlsl", 0, 0, "VShader", "vs_5_0", D3D10_SHADER_OPTIMIZATION_LEVEL3, 0, 0, &VS2, 0, 0))) throw(std::string("Sky Shader Error 1"));
if (FAILED(D3DX11CompileFromFile(L"resources/sky.hlsl", 0, 0, "PShader", "ps_5_0", D3D10_SHADER_OPTIMIZATION_LEVEL3, 0, 0, &PS2, 0, 0))) throw(std::string("Sky Shader Error 2"));
// encapsulate both shaders into shader objects
if (FAILED(d3ddev->CreateVertexShader(VS2->GetBufferPointer(), VS2->GetBufferSize(), NULL, &pskyVS))) throw(std::string("Sky Shader Error 1A"));
if (FAILED(d3ddev->CreatePixelShader(PS2->GetBufferPointer(), PS2->GetBufferSize(), NULL, &pskyPS))) throw(std::string("Sky Shader Error 2A"));
Using two buffers for compiling the shaders solved the problem, though i have no idea why. Thank you for the support, though ;)
Related
With a cube defined as in the following code, you see that normals are often negative in one axis. (even if we calcultate them)
OpenGL manages it with its fixed pipeline, correct me if I'm wrong, but with programmable pipeline, it causes artifacts like black faces. (My previous stackoverflow question provides code.)
I managed to run my code with an operation on my normals (normal = (0.5 + 0.5 * normal); ), but even if the result looks ok, I wonder if my normals are still valid? (And is this operation the best?)
I mean, from a shader point of view, can I still use them to shade or brighten my models? How do you usually do?
The mentionned normals:
const GLfloat cube_vertices[] = {
1, 1, 1, -1, 1, 1, -1,-1, 1, // v0-v1-v2 (front)
-1,-1, 1, 1,-1, 1, 1, 1, 1, // v2-v3-v0
1, 1, 1, 1,-1, 1, 1,-1,-1, // v0-v3-v4 (right)
1,-1,-1, 1, 1,-1, 1, 1, 1, // v4-v5-v0
1, 1, 1, 1, 1,-1, -1, 1,-1, // v0-v5-v6 (top)
-1, 1,-1, -1, 1, 1, 1, 1, 1, // v6-v1-v0
-1, 1, 1, -1, 1,-1, -1,-1,-1, // v1-v6-v7 (left)
-1,-1,-1, -1,-1, 1, -1, 1, 1, // v7-v2-v1
-1,-1,-1, 1,-1,-1, 1,-1, 1, // v7-v4-v3 (bottom)
1,-1, 1, -1,-1, 1, -1,-1,-1, // v3-v2-v7
1,-1,-1, -1,-1,-1, -1, 1,-1, // v4-v7-v6 (back)
-1, 1,-1, 1, 1,-1, 1,-1,-1 }; // v6-v5-v4
const GLfloat cube_normalsI[] = {
0, 0, 1, 0, 0, 1, 0, 0, 1, // v0-v1-v2 (front)
0, 0, 1, 0, 0, 1, 0, 0, 1, // v2-v3-v0
1, 0, 0, 1, 0, 0, 1, 0, 0, // v0-v3-v4 (right)
1, 0, 0, 1, 0, 0, 1, 0, 0, // v4-v5-v0
0, 1, 0, 0, 1, 0, 0, 1, 0, // v0-v5-v6 (top)
0, 1, 0, 0, 1, 0, 0, 1, 0, // v6-v1-v0
-1, 0, 0, -1, 0, 0, -1, 0, 0, // v1-v6-v7 (left)
-1, 0, 0, -1, 0, 0, -1, 0, 0, // v7-v2-v1
0,-1, 0, 0,-1, 0, 0,-1, 0, // v7-v4-v3 (bottom)
0,-1, 0, 0,-1, 0, 0,-1, 0, // v3-v2-v7
0, 0,-1, 0, 0,-1, 0, 0,-1, // v4-v7-v6 (back)
0, 0,-1, 0, 0,-1, 0, 0,-1 }; // v6-v5-v4
No, this makes no sense at all. Either you need to update your question or you got it all wrong.
Normal may face any direction and normals are often negative in one axis is completely natural. Why wouldn't they be? From what you are describing you seem to be working with lighting. A part of lighting uses normal to see what is the angle between light source and surface. The idea here is that when you turn the normal a light ray effectively lightens a larger part of surface which reduces density of reflected light. With basic math you can see that the correlation is cos(angle) so parallel vectors will produce highest brightness. Since we are using vectors we are better of replacing cos with dot product.
So at some point you have
float factor = dot(normalize(normal), normalize(lightSource-surfacePoint))
Let's have 2 examples here:
normal = (0, 1, 0)
lightSource = (0, 1, 0)
surfacePoint = (0, 0, 0)
dot((0, 1, 0), (0, 1, 0)) = 0+1+0 = 1
and turn it around:
normal = (-1, 0, 0)
lightSource = (-3, 1, 0)
surfacePoint = (0, 1, 0)
dot((-1, 0, 0), normalize(-3, 0, 0)) = dot((-1, 0, 0), (1, 0, 0)) = 1+0+0 = 1
so even if positions are completely changed and normals negative we will get the same result for same angles (in these cases the vectors being perpendicular).
The only question here is what to do when dot product is negative. That happens when normal faces away from the light. In your case you have a cube and all normals point outwards. What if you needed to be inside a cube and still have lighting? You will get
normal = (0, 1, 0)
lightSource = (0, 0, 0)
surfacePoint = (0, 1, 0)
dot((0, 1, 0), (0, -1, 0)) = 0-1+0 = -1
Because of such cases you need to either clam the values or use absolute values. Clamping will produce interior of cube to be black (not lighted) while absolute value will light those as well:
fragmentColor += lightColor*dotFactor // Do nothing and your light will darken the area
fragmentColor += lightColor*abs(dotFactor) // Use absolute value to lighten even if facing away
fragmentColor += lightColor*max(0.0, dotFactor) // Clamp minimum so there are no negative values.
But none of these have nothing to do with normals facing any direction in absolute coordinate system. It just has to do with relative positions between normal, pixel location and light source.
Okay so loading in a model works fine but when I try to add a texture or lighting they become stretched, I think it is having problems sharing vertices. I have tried changing the sampler_desc address as well as messing around with the input layout but no luck. The results
HRESULT Model::CreateTextureSampler(char* filename) {
hr = D3DX11CreateShaderResourceViewFromFile(m_pD3DDevice, filename,
NULL, NULL,
&m_pTexture0, NULL);
if (FAILED(hr)) // Dont fail if error is just a warning
return hr;
D3D11_SAMPLER_DESC sampler_desc;
ZeroMemory(&sampler_desc, sizeof(sampler_desc));
sampler_desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR;
sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP;
sampler_desc.MinLOD = 0;
sampler_desc.MaxLOD = D3D11_FLOAT32_MAX;
hr = m_pD3DDevice->CreateSamplerState(&sampler_desc, &m_pSampler0);
if (FAILED(hr)) // Dont fail if error is just a warning
return hr;
}
HRESULT Model::CreateInputLayout()
{
// Create and set the input layout object
D3D11_INPUT_ELEMENT_DESC m_iedesc[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0,0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0,DXGI_FORMAT_R32G32B32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 }
};
hr = m_pD3DDevice->CreateInputLayout(m_iedesc, 4, M_VS->GetBufferPointer(),
M_VS->GetBufferSize(), &m_pInputLayout);
if (FAILED(hr)) // Dont fail if error is just a warning
return hr;
m_pImmediateContext->IASetInputLayout(m_pInputLayout);
m_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
return S_OK;
}
When I'm looking at your line of code and I see this line here:
"TEXCOORD", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, D3D11_APPEND_ALIGNED_ELEMENT, D3D11_INPUT_PER_VERTEX_DATA, 0 },
You are passing to the input element descriptor for the tex coord with a third parameter of DXGI_FORMAT_R32G32B32A32_FLOAT. You might want to check this flag to see if this is causing your problem.
Normally texture coords are a vec2 object when using simple textures, unless if you are using a 3D Texture.
You might want to change this to DXGI_FORMAT_R32G32_FLOAT, and give the appropriate [S,T] or [P,Q] vertices for the texture file. Textures are normally images stored in memory linearly but are representing a MxN structure.
I'm trying to render a fullscreen tessellated mesh. It works as expected for up to around 250x250 vertices, but past that, it cuts off after the top half or so (depending on how tessellated I set the mesh).
I have a vertex and index vector, which I populate programmatically as a trianglelist. I suspect it might be related to the buffer size of the vertex buffer, since the number of vertices/indices seems right, but when I try to increase the bytewidth allocation, I get a memory access violation.
The vertex data struct defined as such:
struct SimpleVertex
{
XMFLOAT3 Pos;
XMFLOAT2 Tex;
};
To initialize the buffers, I use:
D3D11_BUFFER_DESC bd;
bd.Usage = D3D11_USAGE_DEFAULT;
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = 0;
...
D3D11_SUBRESOURCE_DATA InitData;
InitData.pSysMem = &(meshVertices[0]);
...
D3D11_INPUT_ELEMENT_DESC meshLayout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
Vertex buffer:
...
bd.ByteWidth = sizeof(SimpleVertex) * (UINT)(meshVertices.size());
...
hr = g_pd3dDevice->CreateBuffer(&bd, &InitData, &g_pMeshVertexBuffer);
Index buffer:
...
bd.ByteWidth = sizeof(WORD) * (UINT)(meshIndices.size());
...
hr = g_pd3dDevice->CreateBuffer(&bd, &InitData, &g_pMeshIndexBuffer);
Draw call:
void RenderMesh(){
g_pImmediateContext->OMSetRenderTargets(1, &g_pRenderTargetView, g_pDepthStencilView);
UINT stride = sizeof(SimpleVertex);
UINT offset = 0;
g_pImmediateContext->IASetVertexBuffers(0, 1, &g_pMeshVertexBuffer, &stride, &offset);
g_pImmediateContext->IASetInputLayout(g_pMeshVertexLayout);
g_pImmediateContext->IASetIndexBuffer(g_pMeshIndexBuffer, DXGI_FORMAT_R16_UINT, 0);
g_pImmediateContext->VSSetShader(g_pMeshVertexShader, nullptr, 0);
g_pImmediateContext->PSSetShader(g_pMeshPixelShader, nullptr, 0);
g_pImmediateContext->PSSetSamplers(0, 1, &g_pSamplerLinear);
g_pImmediateContext->DrawIndexed(numMeshIndices, 0, 0);
}
Any help would be greatly appreciated.
I have the following uniform buffer:
uniform EDGE_ID_TO_START_POS{
uint[12*3] pos;
} edgeIdToStartPos;
Writing to this buffer works perfectly fine:
#define EDGE_ID_TO_START_POS_SIZE (12*3)
const uint32_t edgeIdToStartPos_constBuffer[EDGE_ID_TO_START_POS_SIZE] = {
/* 0*/ 0, 0, 0, /* 1*/ 0, 1, 0, /* 2*/ 1, 0, 0, /* 3*/ 0, 0, 0,
/* 4*/ 0, 0, 1, /* 5*/ 0, 1, 1, /* 6*/ 1, 0, 1, /* 7*/ 0, 0, 1,
/* 8*/ 0, 0, 0, /* 9*/ 0, 1, 0, /*10*/ 1, 1, 0, /*11*/ 1, 0, 0
};
glBindBuffer(GL_UNIFORM_BUFFER, ubo);
glBufferData(GL_UNIFORM_BUFFER, EDGE_ID_TO_START_POS_SIZE * sizeof(uint32_t), edgeIdToStartPos_constBuffer, GL_STATIC_DRAW);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
Now I want to change the uniform buffer to the following structure:
uniform EDGE_ID_TO_START_POS{
uvec3[12] pos;
} edgeIdToStartPos;
This structure makes more sense and is easier to use. And it should have the same memory layout than the previous version (?).
However, when reading from this struct, only the vectors 0..3 contain data. All uvecs with an index of 4 or higher contain zeros.
Why does this happen and how can I initialise such a uniform buffer correctly?
(C/C++, glew, glfw)
A uvec3 has an alignment of 16 bytes, so you must add padding. See OpenGL 4.5 spec, section 7.6.22:
If the member is a three-component vector with components consuming N basic machine units, the base alignment is 4N.
Here is how it would look:
static const int EDGE_ID_TO_START_POS_SIZE = 12 * 4;
const uint32_t edgeIdToStartPos_constBuffer[EDGE_ID_TO_START_POS_SIZE] = {
/* 0*/ 0, 0, 0, 0, /* 1*/ 0, 1, 0, 0, /* 2*/ 1, 0, 0, 0, /* 3*/ 0, 0, 0, 0,
/* 4*/ 0, 0, 1, 0, /* 5*/ 0, 1, 1, 0, /* 6*/ 1, 0, 1, 0, /* 7*/ 0, 0, 1, 0,
/* 8*/ 0, 0, 0, 0, /* 9*/ 0, 1, 0, 0, /*10*/ 1, 1, 0, 0, /*11*/ 1, 0, 0, 0,
};
I have recently been working on shifting over to handling my matrix controls myself, so that my engine will be ready to shift over to a 4.0+ context, though currently I am using a 2.1 context with Derelict. I have tried all kinds of matrix calcs and nothing seems to work with my shader(nothing on screen except for FPS counter unless I switch back to fixed-pipe, or if I switch the vs to set gl_Position = vec4(position,1.0);). Even when I grab the values that OpenGL normally sets and put them into it I get the same.
I added print out of both my matrix and the one that the fixed-pipeline returned to the draw calls and the output is IDENTICAL. I've already checked and double checked my shader code, so I'm pretty lost as to from where this bug is coming.
This happens on both OSX and Windows. I have a glGetError() check every draw call and am not getting any errors. Shaders link and validate with out warning or error.
code for uniform loading:
glUniformMatrix4fv(ModelViewLoc,1, GL_FALSE ,modelview);
glUniformMatrix4fv(ProjectionLoc,1, GL_FALSE ,projection);
Vertex Shader:
#version 120
//layout(location = 0) in vec3 position;
attribute vec3 position;
uniform mat4 ModelView;
uniform mat4 Projection;
void main()
{
mat4 mvp = Projection*ModelView;
gl_Position = mvp * vec4(position,1.0);
//gl_Position = vec4(position,1.0);
}
Fragment Shader:
#version 120
uniform float slider;
void main()
{
vec4 diffuse = vec4(vec3(slider),1.0);
gl_FragColor = diffuse;
}
debug output:
Model 1:
OpenGL ModelView: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, -2, -2, -8, 1]
mine: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, -2, -2, -8, 1]
OpenGL Projection:[1.30323, 0, 0, 0, 0, 1.30323, 0, 0, 0, 0, -1.0002, -1, 0, 0, -0.20002, 0]
mine: [1.30323, 0, 0, 0, 0, 1.30323, 0, 0, 0, 0, -1.0002, -1, 0, 0, -0.20002, 0]
Model 2:
OpenGL ModelView: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 2, 2, -8, 1]
mine: [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 2, 2, -8, 1]
OpenGL Projection:[1.30323, 0, 0, 0, 0, 1.30323, 0, 0, 0, 0, -1.0002, -1, 0, 0, -0.20002, 0]
mine: [1.30323, 0, 0, 0, 0, 1.30323, 0, 0, 0, 0, -1.0002, -1, 0, 0, -0.20002, 0]
*UPDATE*I have released the code for matrix lib # github.com/mclark4386/DMath See anything there?^^;