I am learning DirectX and trying to make sure I understand what the functions are doing before I move on to creating an index buffer (which is why I am using repetitive Vertices instead of specific indices). I am trying to render a square but I only have one triangle displaying. I am sure that it is my misunderstanding of the winding order or offsetting the values when passing to the shader but I cant find the issue. Below is the current relevant code and the result at runtime.
void RenderFrame(void) {
// Clear BackBuffer to a color
devContext->ClearRenderTargetView(backbuffer, D3DXCOLOR(0.0f, 0.2f, 0.4f, 1.0f));
// select which vertex buffer to display
UINT stride = sizeof(Vertex);
UINT offset = 0;
devContext->IASetVertexBuffers(0, 1, &pVBuffer, &stride, &offset);
// select which primtive type we are using
devContext->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELST);
// draw the vertex buffer to the back buffer
devContext->Draw(6, 0);
// swap buffers
swapchain->Present(0, 0);
}
void ParseGraphics() {
// Create a triangle with the Vertex Struct
Vertex square[] =
{
{ 0.2f, 0.5f, 0.5f, D3DXCOLOR(0.0f, 0.0f, 1.0f, 1.0f) }, //top-right
{ 0.2f, -0.5f, 0.5f, D3DXCOLOR(0.0f, 0.0f, 0.0f, 1.0f) }, //bottom-right
{ -0.2f, -0.5f, 0.5f, D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f) }, //bottom-left
{ 0.2f, -0.5f, 0.5f, D3DXCOLOR(0.0f, 0.0f, 0.0f, 1.0f) }, //bottom-right
{ -0.2f, -0.5f, 0.5f, D3DXCOLOR(1.0f, 0.0f, 0.0f, 1.0f) }, //bottom-left
{ -0.2f, 0.5f, 0.5f, D3DXCOLOR(0.0f, 1.0f, 0.0f, 1.0f) }, //top-left
};
// Create Vertex Buffer
D3D11_BUFFER_DESC bd;
ZeroMemory(&bd, sizeof(bd));
bd.Usage = D3D11_USAGE_DYNAMIC;
bd.ByteWidth = sizeof(Vertex) * 6;
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
dev->CreateBuffer(&bd, NULL, &pVBuffer);
// copy vertices into buffer
D3D11_MAPPED_SUBRESOURCE msr;
devContext->Map(pVBuffer, NULL, D3D11_MAP_WRITE_DISCARD, NULL, &msr);
memcpy(msr.pData, square, sizeof(square));
devContext->Unmap(pVBuffer, NULL);
}
void BuildPipeline() {
// Load and Compile Shaders
ID3D10Blob *VS, *PS;
D3DX11CompileFromFile(L"shaders.shader", 0, 0, "VShader", "vs_5_0", 0, 0, 0, &VS, 0, 0);
D3DX11CompileFromFile(L"shaders.shader", 0, 0, "PShader", "ps_5_0", 0, 0, 0, &PS, 0, 0);
// Create shaders from the data in the Blobs Buffer
dev->CreateVertexShader(VS->GetBufferPointer(), VS->GetBufferSize(), NULL, &pVS);
dev->CreatePixelShader(PS->GetBufferPointer(), PS->GetBufferSize(), NULL, &pPS);
// Apply Shaders to the device context
devContext->VSSetShader(pVS, 0, 0);
devContext->PSSetShader(pPS, 0, 0);
// Define the layout of the input given to the shaders
D3D11_INPUT_ELEMENT_DESC ied[] =
{
{"POSITION", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0},
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0},
};
dev->CreateInputLayout(ied, 2, VS->GetBufferPointer(), VS->GetBufferSize(), &pLayout);
devContext->IASetInputLayout(pLayout);
}
{"POSITION", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0}
should be DXGI_FORMAT_R32G32B32_FLOAT as the position has only three members. The AlignedByteOffset can be set to 12 (4x3).
For the topology, you specify D3D10_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP, so the vertex data is expected to form a triangle strip. However, the data you provide specifies two separate triangles (i.e. a triangle list), so you should probably use
devContext->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
The essential mistake seems to be in the Draw() call: you specify that you want to draw a total of 3 vertices, while you actually want to draw all 6 vertices from your buffer. I.e.
devContext->Draw(6, 0);
The byte offset of your Color element description is wrong, too. The position takes 3 single precision floats (3*4 = 12 bytes), and is directly followed by the color data.
{"COLOR", 0, DXGI_FORMAT_R32G32B32A32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0},
Related
I'm rendering this cube and it should show the front of the cube but instead it shows the back (green color). How do i solve this? I've been sitting for a couple of hours trying to fix this but nothing helped. I was trying various things like changing the order in which the triangles are rendered and it didn't help either. Thanks for any help. Here's my code.
float vertices[] =
{
//front
-0.5f, -0.5f, 0.0f, 1.f, 0.0f, 0.5f,
0.5f, -0.5f, 0.0f, 1.f, 0.0f, 0.5f,
0.5f, 0.5f, 0.0f, 1.f, 0.0f, 0.5f,
-0.5f, 0.5f, 0.0f, 1.f, 0.0f, 0.5f,
//back
-0.5f/2, -0.5f/2, -0.5f, 0.0f, 1.f, 0.0f,
0.5f/2, -0.5f/2, -0.5f, 0.0f, 1.f, 0.0f,
0.5f/2, 0.5f/2, -0.5f, 0.0f, 1.f, 0.0f,
-0.5f/2, 0.5f/2, -0.5f, 0.0f, 1.f, 0.0f,
};
unsigned int indices[] =
{
//front
0, 2, 3,
0, 1, 2,
//back
4, 6, 7,
4, 5, 6,
//top
3, 6, 2,
3, 7, 6,
//bottom
0, 1, 5,
0, 5, 4,
//left
3, 0, 4,
3, 4, 7,
//right
1, 2, 5,
2, 6, 5
};
int main()
{
if (!glfwInit())
{
std::cout << "ERROR" << std::endl;
return -1;
}
int width = 640;
int height = 480;
window = glfwCreateWindow(width, height, "OPENGL", NULL, NULL);
if (!window)
{
std::cout << "ERROR: WINDOW" << std::endl;
}
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_VERSION_MINOR, 6);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwMakeContextCurrent(window);
glfwSwapInterval(1);
if (glewInit() != GLEW_OK)
{
std::cout << "ERROR: GLEW" << std::endl;
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
std::cout << "OpenGL " << glGetString(GL_VERSION) << std::endl;
VertexArray va1;
VertexBuffer vb1(vertices, sizeof(vertices), GL_STATIC_DRAW);
IndexBuffer ib1(indices, sizeof(indices), GL_STATIC_DRAW);
va1.linkAttrib(vb1, 0, 3, GL_FLOAT, 6 * sizeof(float), 0);
va1.linkAttrib(vb1, 1, 3, GL_FLOAT, 6 * sizeof(float), 3 * sizeof(float));
ShaderSources sources = parseShader("basic.shader");
unsigned int program = createShaderProgram(sources.vertexSource, sources.fragmentSource);
glUseProgram(program);
while (!glfwWindowShouldClose(window))
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDrawElements(GL_TRIANGLES, sizeof(indices) / sizeof(unsigned int), GL_UNSIGNED_INT, nullptr);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}
And here what the cube looks like:
You currently are using glEnable(GL_DEPTH_TEST) withglDepthFunc(GL_LESS), which means only fragments having a smaller z (or depth) component are rendered when rendering overlapped triangles. Since your vertex positions are defined with the back-face having a smaller z coordinate than the front-face, all front-face fragments are ignored (since their z coordinate is larger).
Solutions are:
Using glDepthFunc(GL_GREATER) instead of glDepthFunc(GL_LESS) (which may not work in your case, considering your vertices have z <= 0.0 and the depth buffer is cleared to 0.0)
Modify your vertex positions to give front-face triangles a smaller z component than back-face triangles.
I believe that when using matrix transforms, a smaller z component normally indicates the fragment is closer to the camera, which is why glDepthFunc(GL_LESS) is often used.
I have been following this tutorial of DirectX http://www.directxtutorial.com/Lesson.aspx?lessonid=9-4-5. I have just started DirectX with C++. I have drawn two rectangles on the screen but cannot translate them.Th rectangles have different position on screen and are stored in Array OurVertices. I have been reading from the tutorial and it says that you first have to apply world transformation to translate it in 3d but I dont want to get into 3d because I have two simple 2d rectangles. How you move simple 2d rectangles in DirectX C++. If I apply World Transformation using the tutorial I dont know why my camera position is tilted. My code is given below:
CUSTOMVERTEX OurVertices[] =
{
// 1
{ 0, 0, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 0), }, //meaning x,y,z,Dword
{ 100, 0, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 0), },
{ 0, 100, 0.0f, 1.0f, D3DCOLOR_XRGB(255, 255, 0), },
{ 100, 100, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 0), },
{ 200, 200, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 255), },
{ 400, 200, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 255), },
{ 200, 400, 0.0f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), },
{ 400, 400, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 255), },
};
d3ddev->CreateVertexBuffer(8 * sizeof(CUSTOMVERTEX),
0,
CUSTOMFVF,
D3DPOOL_MANAGED,
&v_buffer,
NULL);
VOID* pVoid; // the void* we were talking about
v_buffer->Lock(0, 0, (void**)&pVoid, 0); // locks v_buffer, the buffer we made earlier
memcpy(pVoid, OurVertices, sizeof(OurVertices));
v_buffer->Unlock(); // unlock v_buffer
d3ddev->SetFVF(CUSTOMFVF);
void render_frame()
{
d3ddev->Clear(0, NULL, D3DCLEAR_TARGET, D3DCOLOR_XRGB(0, 0, 0), 1.0f, 0);
d3ddev->BeginScene();
// select which vertex format we are using
// select the vertex buffer to display
d3ddev->SetFVF(CUSTOMFVF);
// SET UP THE PIPELINE
D3DXMATRIX matTranslate;
static float index = 0.0f;
index += 0.01f; // an ever-increasing float value
// build a matrix to rotate the model based on the increasing float value
D3DXMatrixTranslation(&matTranslate, 0, index , 0.0f);
// tell Direct3D about our matrix
d3ddev->SetTransform(D3DTS_WORLD, &matTranslate);
D3DXMATRIX matView; // the view transform matrix
D3DXMatrixLookAtLH(&matView,
&D3DXVECTOR3(0.0f, 0.0f, 10.0f), // the camera position
&D3DXVECTOR3(0.0f, 0.0f, 0.0f), // the look-at position
&D3DXVECTOR3(0.0f, 1.0f, 0.0f)); // the up direction
d3ddev->SetTransform(D3DTS_VIEW, &matView); // set the view transform to matView
D3DXMATRIX matProjection; // the projection transform matrix
D3DXMatrixPerspectiveFovLH(&matProjection,
D3DXToRadian(100), // the horizontal field of view
(FLOAT)800 / (FLOAT)600, // aspect ratio
1.0f, // the near view-plane
100.0f); // the far view-plane
d3ddev->SetTransform(D3DTS_PROJECTION, &matProjection); // set the projection
// select the vertex buffer to display
d3ddev->SetStreamSource(0, v_buffer, 0, sizeof(CUSTOMVERTEX));
// copy the vertex buffer to the back buffer
d3ddev->DrawPrimitive(D3DPT_TRIANGLESTRIP, 0, 2);
d3ddev->DrawPrimitive(D3DPT_TRIANGLESTRIP, 4, 2);
d3ddev->EndScene();
d3ddev->Present(NULL, NULL, NULL, NULL);
}
Is there any way to translate two rectangles?
You are already working in 3D, when you specify the position of your vertices you have an extra z component wich is set to 0 at the moment. You can change it and see the effect on your scene.
{ 0, 0, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 0), }, //meaning x,y,z,Dword
{ 100, 0, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 0), },
{ 0, 100, 0.0f, 1.0f, D3DCOLOR_XRGB(255, 255, 0), },
{ 100, 100, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 0), },
{ 200, 200, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 255), },
{ 400, 200, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 255), },
{ 200, 400, 0.0f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), },
{ 400, 400, 0.0f, 0.0f, D3DCOLOR_XRGB(255, 255, 255), },
So now each frame you are translating your vertices by an increasing amount on the y axis.
static float index = 0.0f;
index += 0.01f; // an ever-increasing float value
// build a matrix to rotate the model based on the increasing float value
D3DXMatrixTranslation(&matTranslate, 0, index , 0.0f);
If you print index each frame you will see him growing and you use this value to to build a translation matrix which will be apply before view/projection.
I don't know what is the effect you want.
But all the vertices of your scene will go from bottom to top, which you can look like your camera is going down.
I have been trying to learn the basics of DirectX 11 programming using the MSDN tutorial05 sample code and I have run into an issue I cannot find a solution for on the internet (that I could see anyway). Basically I am trying to draw and render a player cube object, complete with user input, and pyramid-like objects that the player must collect.
My issue is that when I am rendering the scene, only the cube vertex (and indices) data is being read so all objects are cubes when they shouldn't be.
This is the function where the vertex data is made:
PyramidVertex Pyramid[] =
{
// Square base of the pyramid
{ XMFLOAT3( -0.5f, -0.5f, 0.5f), XMFLOAT4(0.0f, 1.0f, 0.0f, 1.0f) },
{ XMFLOAT3( 0.5f, -0.5f, 0.5f), XMFLOAT4(0.0f, 0.0f, 1.0f, 1.0f) },
{ XMFLOAT3(-0.5f, -0.5f, -0.5f), XMFLOAT4(1.0f, 0.0f, 0.0f, 1.0f) },
{ XMFLOAT3(0.5f, -0.5f, -0.5f), XMFLOAT4(0.0f, 1.0f, 1.0f, 1.0f) },
// The tip of the pyramid
{ XMFLOAT3(0.0f, 0.5f, 0.0f), XMFLOAT4(0.0f, 1.0f, 0.0f, 1.0f) },
};
D3D11_BUFFER_DESC bdP;
ZeroMemory(&bdP, sizeof(bdP));
bdP.Usage = D3D11_USAGE_DEFAULT;
bdP.ByteWidth = sizeof(PyramidVertex) * 5;
bdP.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bdP.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA InitPData;
ZeroMemory(&InitPData, sizeof(InitPData));
InitPData.pSysMem = Pyramid;
hr = g_pd3dDevice->CreateBuffer(&bdP, &InitPData, &g_pVertexBufferP);
if (FAILED(hr))
return hr;
// Set vertex buffer
UINT pStride = sizeof(PyramidVertex);
UINT pOffset = 1;
g_pImmediateContext->IASetVertexBuffers(0, 1, &g_pVertexBufferP, &pStride, &pOffset);
// create the index buffer
DWORD pIndex[] =
{
0, 2, 1,
1, 2, 3,
0, 1, 4,
1, 3, 4,
3, 2, 4,
2, 0, 4,
};
// create the index buffer
bdP.Usage = D3D11_USAGE_DYNAMIC;
bdP.ByteWidth = sizeof(DWORD) * 18;
bdP.BindFlags = D3D11_BIND_INDEX_BUFFER;
bdP.CPUAccessFlags = D3D11_CPU_ACCESS_WRITE;
bdP.MiscFlags = 0;
InitPData.pSysMem = pIndex;
hr = g_pd3dDevice->CreateBuffer(&bdP, &InitPData, &g_pIndexBufferP);
if (FAILED(hr))
return hr;
// Set index buffer
g_pImmediateContext->IASetIndexBuffer(g_pIndexBufferP, DXGI_FORMAT_R16_UINT, 0);
// Set primitive topology
g_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
// Create vertex buffer
SimpleVertex vertices[] =
{
{ XMFLOAT3( -1.0f, 1.0f, -1.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
{ XMFLOAT3( 1.0f, 1.0f, -1.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
{ XMFLOAT3( 1.0f, 1.0f, 1.0f ), XMFLOAT4( 0.0f, 1.0f, 1.0f, 1.0f ) },
{ XMFLOAT3( -1.0f, 1.0f, 1.0f ), XMFLOAT4( .0f, 1.0f, 1.0f, 1.0f ) },
{ XMFLOAT3( -1.0f, -1.0f, -1.0f ), XMFLOAT4 (1.0f, 1.0f, 1.0f, 1.0f ) },
{ XMFLOAT3( 1.0f, -1.0f, -1.0f ), XMFLOAT4( 1.0f, 1.0f, 1.0f, 1.0f ) },
{ XMFLOAT3( 1.0f, -1.0f, 1.0f ), XMFLOAT4( 0.0f, 1.0f, 1.0f, 1.0f ) },
{ XMFLOAT3( -1.0f, -1.0f, 1.0f ), XMFLOAT4( .0f, 1.0f, 1.0f, 1.0f ) },
};
D3D11_BUFFER_DESC bd;
ZeroMemory( &bd, sizeof(bd) );
bd.Usage = D3D11_USAGE_DEFAULT;
bd.ByteWidth = sizeof( SimpleVertex ) * 8;
bd.BindFlags = D3D11_BIND_VERTEX_BUFFER;
bd.CPUAccessFlags = 0;
D3D11_SUBRESOURCE_DATA InitData;
ZeroMemory( &InitData, sizeof(InitData) );
InitData.pSysMem = vertices;
hr = g_pd3dDevice->CreateBuffer( &bd, &InitData, &g_pVertexBuffer );
if( FAILED( hr ) )
return hr;
// Set vertex buffer
UINT stride = sizeof( SimpleVertex );
UINT offset = 0;
g_pImmediateContext->IASetVertexBuffers( 0, 1, &g_pVertexBuffer, &stride, &offset );
// Create index buffer
WORD indices[] =
{
3,1,0,
2,1,3,
0,5,4,
1,5,0,
3,4,7,
0,4,3,
1,6,5,
2,6,1,
2,7,6,
3,7,2,
6,4,5,
7,4,6,
};
bd.Usage = D3D11_USAGE_DEFAULT;
bd.ByteWidth = sizeof( WORD ) * 36; // 36 vertices needed for 12 triangles in a triangle list
bd.BindFlags = D3D11_BIND_INDEX_BUFFER;
bd.CPUAccessFlags = 0;
InitData.pSysMem = indices;
hr = g_pd3dDevice->CreateBuffer( &bd, &InitData, &g_pIndexBuffer );
if( FAILED( hr ) )
return hr;
// Set index buffer
g_pImmediateContext->IASetIndexBuffer( g_pIndexBuffer, DXGI_FORMAT_R16_UINT, 0 );
// Set primitive topology
g_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
// Create the constant buffer
bd.Usage = D3D11_USAGE_DEFAULT;
bd.ByteWidth = sizeof(ConstantBuffer);
bd.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
bd.CPUAccessFlags = 0;
hr = g_pd3dDevice->CreateBuffer( &bd, nullptr, &g_pConstantBuffer );
if( FAILED( hr ) )
return hr;
// Create the constant buffer
bdP.Usage = D3D11_USAGE_DEFAULT;
bdP.ByteWidth = sizeof(ConstantBuffer);
bdP.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
bdP.CPUAccessFlags = 0;
hr = g_pd3dDevice->CreateBuffer(&bdP, nullptr, &g_pConstantBufferP);
if (FAILED(hr))
return hr;
// Initialize the world matrix
g_Player = XMMatrixIdentity();
for (int i = 0; i < 10; ++i)
{
g_Shapes[i] = XMMatrixIdentity();
}
// Initialize the view matrix
XMVECTOR Eye = XMVectorSet( 0.0f, 1.0f, -5.0f, 0.0f );
XMVECTOR At = XMVectorSet( 0.0f, 1.0f, 0.0f, 0.0f );
XMVECTOR Up = XMVectorSet( 0.0f, 4.0f, 0.0f, 0.0f );
g_View = XMMatrixLookAtLH( Eye, At, Up );
// Initialize the projection matrix
g_Projection = XMMatrixPerspectiveFovLH( XM_PIDIV2, width / (FLOAT)height, 0.01f, 100.0f );
I believe that the issue is somewhere here and my theory is that the pyramid g_pImmediateContext is being overwritten when it comes to drawing the cubes. If this is the case then I have no clue on how to solve, or research, this problem. I has taken me an hour to figure out to put my code on this page properly but got weird results so I will leave a link to my Google Drive containing this code if someone wants to have an in-depth look (for whatever reason) at the code.
This is the render function:
//
// Clear the back buffer
//
g_pImmediateContext->ClearRenderTargetView(g_pRenderTargetView, Colors::Black);
//
// Clear the depth buffer to 1.0 (max depth)
//
g_pImmediateContext->ClearDepthStencilView(g_pDepthStencilView, D3D11_CLEAR_DEPTH, 1.0f, 0);
XMMATRIX mRotate = XMMatrixRotationZ(DXGame->playerUser->getRotation());
XMMATRIX mTranslate = XMMatrixTranslation(DXGame->playerUser->getXpos(), DXGame->playerUser->getYpos(), DXGame->playerUser->getZpos());
XMMATRIX mScale = XMMatrixScaling(0.7f, 0.7f, 0.7f);
g_Player = mScale * mRotate * mTranslate;
ConstantBuffer cb1;
cb1.mWorld = XMMatrixTranspose(g_Player);
cb1.mView = XMMatrixTranspose(g_View);
cb1.mProjection = XMMatrixTranspose(g_Projection);
g_pImmediateContext->UpdateSubresource(g_pConstantBuffer, 0, nullptr, &cb1, 0, 0);
g_pImmediateContext->VSSetShader(g_pVertexShader, nullptr, 0);
g_pImmediateContext->VSSetConstantBuffers(0, 1, &g_pConstantBuffer);
g_pImmediateContext->PSSetShader(g_pPixelShader, nullptr, 0);
g_pImmediateContext->DrawIndexed(36, 0, 0);
for (int i = 0; i < 10; i++)
{
XMMATRIX sRotate = XMMatrixRotationY((DXGame->pickUps[i].rotation += 0.001f));
XMMATRIX sTranslate = XMMatrixTranslation(DXGame->pickUps[i].xPos, DXGame->pickUps[i].yPos, DXGame->pickUps[i].zPos);
XMMATRIX sScale = XMMatrixScaling(0.2f, 0.2f, 0.2f);
g_Shapes[i] = sScale * sRotate * sTranslate;
ConstantBuffer constB;
constB.mWorld = XMMatrixTranspose(g_Shapes[i]);
constB.mView = XMMatrixTranspose(g_View);
constB.mProjection = XMMatrixTranspose(g_Projection);
g_pImmediateContext->UpdateSubresource(g_pConstantBufferP, 0, nullptr, &constB, 0, 0);
g_pImmediateContext->VSSetShader(g_pVertexShader, nullptr, 0);
g_pImmediateContext->VSSetConstantBuffers(0, 1, &g_pConstantBufferP);
g_pImmediateContext->PSSetShader(g_pPixelShader, nullptr, 0);
g_pImmediateContext->DrawIndexed(18, 0, 0);
}
g_pSwapChain->Present(0, 0);
Something I am also looking at is constant buffers and HLSL to see if that is an issue as well.
Please could someone at least point me in the right direction as this issue has bugged me for almost 2 months now (I left it this long because I wanted to figure it out for myself but now I am desperate for a solution).
Thank you for taking the time to read this post, sorry its so long but I needed to get as much info out there as possible in the hope that it is easier to read.
your calls to IASetIndexBuffer and IASetVertexBuffers are in your creation routines, They need to be in your render function (before to call the relevant Draw function, as those are attaching those buffers to the runtime before drawing)
They do not need to be in creation part at all (as in DirectX11 context eg building commands and device eg : creating resource are decoupled).
You should have, in the render loop:
// Set vertex buffer and index buffer for your cube
UINT stride = sizeof( SimpleVertex );
UINT offset = 0;
g_pImmediateContext->IASetVertexBuffers( 0, 1, &g_pVertexBuffer, &stride,
&offset );
// Set index buffer
g_pImmediateContext->IASetIndexBuffer( g_pIndexBuffer, DXGI_FORMAT_R16_UINT,
0 );
g_pImmediateContext->VSSetShader(g_pVertexShader, nullptr, 0);
g_pImmediateContext->VSSetConstantBuffers(0, 1, &g_pConstantBuffer);
g_pImmediateContext->PSSetShader(g_pPixelShader, nullptr, 0);
g_pImmediateContext->DrawIndexed(36, 0, 0);
and just before to draw all pyramids:
// Set vertex buffer and index buffer for pyramids as you will draw it 10 times, you can do it once just before the loop as geometry will not change
UINT stride = sizeof( SimpleVertex );
UINT offset = 0;
g_pImmediateContext->IASetVertexBuffers( 0, 1, &g_pVertexBuffer, &stride,
&offset );
// Set index buffer
g_pImmediateContext->IASetIndexBuffer(g_pIndexBufferP, DXGI_FORMAT_R16_UINT, 0);
for (int i = 0; i < 10; i++)
{
//Same draw code as before
}
I have an Object class that keeps track of the objects scale, translation and everything else, and I want it to set those transform matrices when it draws(obviously), but for some reason, despite that I set the transform and everything, it does not work.
---------- IN OBJECT------------------
vertices = new Vertex[vertexCount];
scaleX = 100.0f;
scaleY = 100.0f;
scaleZ = 100.0f;
vertices[0] = { 100.0f, 0.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 1, 0, };
vertices[1] = { 100.0f, 100.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 1, 1, };
vertices[2] = { 0.0f, 100.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 0, 1, };
vertices[3] = { 0.0f, 0.0f, 0.5f, 1.0f, D3DCOLOR_XRGB(255, 255, 255), 0, 0, };
v_buffer = NULL;
i_buffer = NULL;
texture = 0;
d3ddev->CreateVertexBuffer(vertexCount * sizeof(Vertex),
0,
CUSTOMFVF,
D3DPOOL_MANAGED,
&v_buffer,
NULL);
short indices[] =
{
0, 1, 2, // side 1
2, 3, 0,
};
// create an index buffer interface called i_buffer
d3ddev->CreateIndexBuffer(6 * sizeof(short),
0,
D3DFMT_INDEX16,
D3DPOOL_MANAGED,
&i_buffer,
NULL);
// lock i_buffer and load the indices into it
i_buffer->Lock(0, 0, (void**)&pVoid, 0);
memcpy(pVoid, indices, sizeof(indices));
i_buffer->Unlock();
// lock v_buffer and load the vertices into it
v_buffer->Lock(0, 0, (void**)&pVoid, 0);
memcpy(pVoid, vertices, sizeof(Vertex)* vertexCount);
v_buffer->Unlock();
-----------DRAW FUNCTION --------------
D3DXVECTOR3 pos = { obj.GetX(), obj.GetY(), obj.GetZ() };
d3ddev->SetFVF(CUSTOMFVF);
// select the vertex buffer to display
d3ddev->SetStreamSource(0, obj.GetVBuffer(), 0, sizeof(Vertex));
d3ddev->SetIndices(obj.GetIBuffer());
D3DXMatrixRotationYawPitchRoll(&obj.rotationTransform, obj.GetRotationX(), obj.GetRotationY(), obj.GetRotationZ());
D3DXMatrixTranslation(&obj.translationTransform, obj.GetX(), obj.GetY(), obj.GetZ());
D3DXMatrixScaling(&obj.scalingTransform, obj.GetScaleX(), obj.GetScaleY(), obj.GetScaleZ());
D3DXMatrixMultiply(&obj.worldTransform, &obj.scalingTransform, &obj.translationTransform);
D3DXMatrixMultiply(&obj.worldTransform, &obj.rotationTransform, &obj.worldTransform);
//obj.worldTransform = obj.rotationTransform * obj.scalingTransform * obj.translationTransform;
d3ddev->SetTransform(D3DTS_WORLD, &obj.worldTransform);
D3DXMatrixLookAtRH(&obj.viewTransform, &D3DXVECTOR3(0, 0, 10), &D3DXVECTOR3(0, 0, 0), &D3DXVECTOR3(0, 0, 0));
d3ddev->SetTransform(D3DTS_VIEW, &obj.viewTransform);
D3DXMatrixPerspectiveFovRH(&obj.projectionTransform, D3DXToRadian(90), (float)SCREEN_WIDTH / (float)SCREEN_HEIGHT, 1.0f, 10.0f);
d3ddev->SetTransform(D3DTS_PROJECTION, &obj.projectionTransform);
// copy the vertex buffer to the back buffer
d3ddev->DrawIndexedPrimitive(D3DPT_TRIANGLELIST, 0, 0, 4, 0, 2);
Okay, you are doing wrong with the multiplication of the matrices. Matrix multiplication is not commutative, that is A*B != B*A. Change the order you multiply them in from rotation*scale*translate to scale*rotation*translate.
Hope that helps.
I just want to do a simple Gouraud Shading with some different colors on my objects. Basically my problem is, that I can't get the color values into the shaders, it just renders black. If I define a vector in the shader as a color for the whole object it works fine.
Main program:
protected void initOpenGL() {
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);
startTime = Sys.getTime();
m = new Matrix4f();
m.m00 = 1;
m.m11 = 1;
m.m22 = -(101.0f / 99);
m.m32 = -(200.0f / 99);
m.m23 = -1;
m.m33 = 0;
makeCube();
sp = new ShaderProgram("gouraud");
glBindAttribLocation(sp.getId(), 0, "corners");
glBindAttribLocation(sp.getId(), 1, "colors");
}
#Override
protected void render() {
gamma = gamma + 1;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
Matrix4f newMat = new Matrix4f(m);
Matrix4f.translate(new Vector3f(0, 0, -5), newMat, newMat);
newMat.rotate(gamma/100, new Vector3f(0, 1, 0));
newMat.rotate(gamma/200, new Vector3f(1, 0, 0));
FloatBuffer fb = BufferUtils.createFloatBuffer(16);
newMat.store(fb);
fb.flip();
GL20.glUniformMatrix4(GL20.glGetUniformLocation(sp.getId(), "matrix"),
false, fb);
GL20.glUseProgram(sp.getId());
glBindVertexArray(vaoId);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glDrawArrays(GL_QUADS, 0, corners.length/3);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
glBindVertexArray(0);
}
private void makeCube(){
corners = new float[] {
// cube
// front
-1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1,
// left
-1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1,
// bottom
-1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, 1,
// right
1, -1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1,
// top
-1, 1, 1, 1, 1, 1, 1, 1, -1, -1, 1, -1,
// back
-1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1,
};
FloatBuffer eckenBuffer = BufferUtils.createFloatBuffer(corners.length);
eckenBuffer.put(corners);
eckenBuffer.flip();
vaoId = glGenVertexArrays();
glBindVertexArray(vaoId);
int vboId = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, vboId);
glBufferData(GL_ARRAY_BUFFER, eckenBuffer, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, false, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
colors = new float[] {
// front
1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f,
// right
1.0f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f,
// back
1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f,
// left
0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f,
// top
0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f,
// bottom
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f, 1.0f, 0.0f };
FloatBuffer colorBuffer = BufferUtils.createFloatBuffer(colors.length);
colorBuffer.put(colors);
colorBuffer.flip();
int vboIdB = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, vboIdB);
glBufferData(GL_ARRAY_BUFFER, colorBuffer, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, false, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
Shader:
Vertex
#version 150
in vec4 corners;
in vec4 colors;
vec4 colorTest = vec4(1.0,0.0,0.0,1.0);
out vec4 colorToFrag;
uniform mat4 matrix;
void main(void) {
colorToFrag = colors ;
gl_Position = matrix * corners;
}
Fragment
#version 150
in vec4 colorToFrag;
out vec4 colorOut;
void main(void) {
colorOut = colorToFrag;
}
You didn't post the code for your shader compilation/linking, but since you call glBindAttribLocation but do not call glLinkProgram in the rest of the code, I make the educated guess that the linking takes place only in the ShaderProgram constructor.
The glBindAttribLocation calls will only affect any linking operations that come after it (obviously). So your location bindings are not effective at all - the GL assigns them.
Now if you don't use the colors attribute in the shader, it will be optimized out and the attribute will not be active - so it will get no location at all, and corners is likely to get index 0, as you expect it. Note that the GL is not required to assing attribute locations sequentially, beginning from zero, but most do.
If you actually use 'colors', it might end up with location 0, breaking your rendering completely.
As another side note: on nvidia, I observed that attribute locations assigned by the GL seem to actually be lexicographically ordered by the variable names in the shader - so 'colors' would come before 'corners'. I'm not sure if that was just a coincidence or how other implementations handle that.