I seem to have ran into a strange issue with OpenGL. Everything works fine with my class until I make the map too big (around 800x800 is the max), and then OpenGL doesn't draw anything. I have made calls to glGetBufferSubData, and as far as I could tell the data seemed correct in both the vertex and index buffers, yet nothing is being drawn? At first I assumed an overflow somewhere in my code, but according to std::numeric_limits my vertex and index iterators don't seem to come anywhere close to the max size of a (signed) int. I use a lot of wrapper classes around OpenGL objects, but they are very simple, usually inline calls to their OpenGL equivalent. Same for the "M_" typedefs around primitive types. Below are the main loop I render in, the class where I believe the issue lies, and 2 screenshots of the output.
Correct output: http://i.imgur.com/cvC1T7L.png
Blank ouput, after expanding map: http://i.imgur.com/MmmNgj4.png
Main loop:
int main(){
//open window
Memento::MainWindow& main_window = Memento::MainWindow::GetInstance();
Memento::MainWindow::Init();
main_window.SetTitle("Memento");
main_window.Open();
//matrices
glmx_mat4 ortho_matrix = {};
glmx_mat4_ortho(0.0f, 800.0f, 600.0f, 0.0f, 5.0f, 25.0f, ortho_matrix);
glmx_mat4 modelview_matrix = {};
glmx_mat4_identity(modelview_matrix);
glmx_vec3 translate_vec = {0.0f, 0.0f, -10.0f};
glmx_mat4_translate(modelview_matrix, translate_vec, modelview_matrix);
glmx_mat4_multiply(ortho_matrix, modelview_matrix, ortho_matrix);
//shaders
Memento::GLShader default_vert_shader("default.vert", GL_VERTEX_SHADER);
default_vert_shader.Compile();
Memento::GLShader default_frag_shader("default.frag", GL_FRAGMENT_SHADER);
default_frag_shader.Compile();
//program
Memento::GLProgram default_program;
default_program.Create();
default_program.AttachShader(default_vert_shader);
default_program.AttachShader(default_frag_shader);
Memento::GLVertexArray default_vert_array;
default_vert_array.Create();
default_vert_array.Bind();
//BufferGameMap class- where I believe the issue lies
Memento::TextureAtlas atlas1("atlas/cat_image.png", "atlas/cat_source.xml");
Memento::BufferGameMap map1("tryagain.tmx", atlas1);
//bind buffers
map1.GetVertexBuffer().Bind();
map1.GetIndexBuffer().Bind();
//upload vertex attributes
default_vert_array.EnableIndex(0);
default_vert_array.IndexData(0, 2, GL_FLOAT, NULL, 8 * sizeof(Memento::M_float));
default_vert_array.BindIndex(default_program, 0, "map_vert");
//link, validate, and use program
default_program.Link();
default_program.Validate();
default_program.Use();
//upload matrix as uniform
glUniformMatrix4fv(default_program.GetUniformLocation("modelviewprojection_matrix"),
1, GL_FALSE, ortho_matrix);
//main draw loop
while(not glfwGetKey(GLFW_KEY_ESC)){
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDrawElements(GL_TRIANGLES, map1.GetIndexBufferLength(), GL_UNSIGNED_INT, NULL);
glfwSwapBuffers();
}
//close window & exit
main_window.Close();
return (0);
}
BufferGameMap class (issue is probably here!):
Memento::BufferGameMap::BufferGameMap(std::string const& file, const Memento::TextureAtlas& atlas):
TmxMap::GameMap(), background_color_color4(), vertex_buffer(), index_buffer(),
vertex_buffer_len(0), index_buffer_len(0){
Create(file, atlas);
}
Memento::M_void Memento::BufferGameMap::Create(std::string const& file, const Memento::TextureAtlas& atlas){
if(IsCreated())Destroy();
TmxMap::GameMap::CreateFromFile(file);
std::vector<TmxMap::Layer> const& layers = GetLayers();
if(not layers.empty()){
const std::vector<TmxMap::Layer>::const_iterator layers_end = layers.end();
std::vector<TmxMap::Layer>::const_iterator layers_iter = layers.begin();
Memento::M_float* vertex_buffer_data = NULL;
Memento::M_uint* index_buffer_data = NULL;
for(; layers_iter != layers_end; ++layers_iter){
vertex_buffer_len += layers_iter -> GetMapTiles().size() * (4 * (2 +
2 + 2 + 2));
index_buffer_len += layers_iter -> GetMapTiles().size() * 6;
}
vertex_buffer_data = new Memento::M_float[vertex_buffer_len];
index_buffer_data = new Memento::M_uint[index_buffer_len];
//fill data to send to the gl
Memento::M_sizei vertex_buffer_iter = 0, index_buffer_iter = 0, index_buffer_quad_iter = 0;
//map data
const Memento::M_uint map_size_x = GetMapSize().x, map_size_y = GetMapSize().y;
const Memento::M_float map_tile_size_x = GetTileSize().x, map_tile_size_y = GetTileSize().y;
//per layer data
std::vector<TmxMap::MapTile> const* map_tiles = NULL;
std::vector<TmxMap::MapTile>::const_iterator map_tiles_iter, map_tiles_end;
//per tile data
Memento::M_float map_origin_x = 0.0f, map_origin_y = 0.0f;
for(layers_iter = layers.begin(); layers_iter != layers_end; ++layers_iter){
map_tiles = &layers_iter -> GetMapTiles();
for(map_tiles_iter = map_tiles -> begin(), map_tiles_end = map_tiles -> end();
map_tiles_iter != map_tiles_end; ++map_tiles_iter,
vertex_buffer_iter += 4 * (2 + 2 + 2 +
2), index_buffer_iter += 6,
index_buffer_quad_iter += 4){
map_origin_x = static_cast<Memento::M_float>(map_tiles_iter -> map_tile_index /
map_size_y) * map_tile_size_x;
map_origin_y = static_cast<Memento::M_float>(map_tiles_iter -> map_tile_index %
map_size_y) * map_tile_size_y;
vertex_buffer_data[vertex_buffer_iter] = map_origin_x;
vertex_buffer_data[vertex_buffer_iter + 1] = map_origin_y;
//=========================================================
vertex_buffer_data[vertex_buffer_iter + 8] = map_origin_x;
vertex_buffer_data[vertex_buffer_iter + 9] = map_origin_y + map_tile_size_y;
//=========================================================
vertex_buffer_data[vertex_buffer_iter + 16] = map_origin_x + map_tile_size_x;
vertex_buffer_data[vertex_buffer_iter + 17] = map_origin_y + map_tile_size_y;
//=========================================================
vertex_buffer_data[vertex_buffer_iter + 24] = map_origin_x + map_tile_size_x;
vertex_buffer_data[vertex_buffer_iter + 25] = map_origin_y;
//=========================================================
index_buffer_data[index_buffer_iter] = index_buffer_quad_iter;
index_buffer_data[index_buffer_iter + 1] = index_buffer_quad_iter + 1;
index_buffer_data[index_buffer_iter + 2] = index_buffer_quad_iter + 2;
index_buffer_data[index_buffer_iter + 3] = index_buffer_quad_iter;
index_buffer_data[index_buffer_iter + 4] = index_buffer_quad_iter + 2;
index_buffer_data[index_buffer_iter + 5] = index_buffer_quad_iter + 3;
}
}
vertex_buffer.Create(GL_ARRAY_BUFFER, GL_STATIC_DRAW);
vertex_buffer.Bind();
vertex_buffer.AllocateRef(vertex_buffer_len * sizeof(Memento::M_float),
static_cast<const Memento::M_void*>(vertex_buffer_data));
vertex_buffer.Unbind();
index_buffer.Create(GL_ELEMENT_ARRAY_BUFFER, GL_STATIC_DRAW);
index_buffer.Bind();
index_buffer.AllocateRef(index_buffer_len * sizeof(Memento::M_uint),
static_cast<const Memento::M_void*>(index_buffer_data));
index_buffer.Unbind();
delete[] vertex_buffer_data;
delete[] index_buffer_data;
}
}
Vertex shader:
#version 140
precision highp float;
uniform mat4 modelviewprojection_matrix;
in vec2 map_vert;
void main(){
gl_Position = modelviewprojection_matrix * vec4(map_vert, 0, 1);
}
Fragment shader:
#version 140
precision highp float;
out vec4 frag_color;
void main(){
frag_color = vec4(0.4, 0.2, 0.6, 0.5);
}
I think you are running out of stack memory.
By allocating the data on the heap you can use all the memory available to your process, while the stack is limited to 1MB.
In other words: Move the object allocation outside of the main scope to the global scope.
Memento::TextureAtlas * atlas1;//("atlas/cat_image.png", "atlas/cat_source.xml");
Memento::BufferGameMap * map1;//("tryagain.tmx", atlas1);
int main(){
atlas1 = new Memento::TextureAtlas("atlas/cat_image.png", "atlas/cat_source.xml");
map1 = new Memento::BufferGameMap("tryagain.tmx", atlas1);
//.... acess with ->
}
or if this will not cause compiler errors:
Memento::TextureAtlas atlas1("atlas/cat_image.png", "atlas/cat_source.xml");
Memento::BufferGameMap map1("tryagain.tmx", atlas1);
int main(){
//.... acess with .
}
Related
I have two different methods for adding elements to a vector.
GUI_Vertices.emplace_back();
GUI_Vertices.back().pos.x = ((float)x / 400) - 1.f;
GUI_Vertices.back().pos.y = ((float)y / 300) - 1.f;
GUI_Vertices.back().texCoord.x = u;
GUI_Vertices.back().texCoord.y = v;
GUI_Vertices.back().color.r = m_Color.r / 128;
GUI_Vertices.back().color.g = m_Color.g / 128;
GUI_Vertices.back().color.b = m_Color.b / 128;
GUI_Vertices.back().color.a = m_Color.a / 128;
The above code works, however I am forced to add a new element to the GUI_Vertices vector.
Vertex NewVertex;
NewVertex.pos.x = ((float)x / 400) - 1.f;
NewVertex.pos.y = ((float)y / 300) - 1.f;
NewVertex.texCoord.x = u;
NewVertex.texCoord.y = v;
NewVertex.color.r = m_Color.r / 128;
NewVertex.color.g = m_Color.g / 128;
NewVertex.color.b = m_Color.b / 128;
NewVertex.color.a = m_Color.a / 128;
GUI_Vertices.emplace_back(NewVertex);
The above code works sometimes and I can conditionally add the NewVertex into the GUI_Vertices vector if needed.
Here is the definition of Vertex:
struct Vertex {
glm::vec3 pos;
glm::vec4 color;
glm::vec2 texCoord;
static VkVertexInputBindingDescription getBindingDescription() {
VkVertexInputBindingDescription bindingDescription = {};
bindingDescription.binding = 0;
bindingDescription.stride = sizeof(Vertex);
bindingDescription.inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
return bindingDescription;
}
static std::array<VkVertexInputAttributeDescription, 3> getAttributeDescriptions() {
std::array<VkVertexInputAttributeDescription, 3> attributeDescriptions = {};
attributeDescriptions[0].binding = 0;
attributeDescriptions[0].location = 0;
attributeDescriptions[0].format = VK_FORMAT_R32G32B32_SFLOAT;
attributeDescriptions[0].offset = offsetof(Vertex, pos);
attributeDescriptions[1].binding = 0;
attributeDescriptions[1].location = 1;
attributeDescriptions[1].format = VK_FORMAT_R32G32B32A32_SFLOAT;
attributeDescriptions[1].offset = offsetof(Vertex, color);
attributeDescriptions[2].binding = 0;
attributeDescriptions[2].location = 2;
attributeDescriptions[2].format = VK_FORMAT_R32G32_SFLOAT;
attributeDescriptions[2].offset = offsetof(Vertex, texCoord);
return attributeDescriptions;
}
bool operator==(const Vertex& other) const {
return pos == other.pos && color == other.color && texCoord == other.texCoord;
}
};
namespace std {
template<> struct hash<Vertex> {
size_t operator()(Vertex const& vertex) const {
return ((hash<glm::vec3>()(vertex.pos) ^
(hash<glm::vec4>()(vertex.color) << 1)) >> 1) ^
(hash<glm::vec2>()(vertex.texCoord) << 1);
}
};
}
Later on in program execution, after adding all our Vertex elements to the GUI_Vertex vector I perform the following operation on GUI_Vertex:
memcpy(GUI_VertexAllocation->GetMappedData(), GUI_Vertices.data(), sizeof(Vertex) * GUI_Vertices.size());
I'm copying the memory from GUI_Vertices into a preallocated buffer which will be used by Vulkan to render our vertices.
Now i'm trying to figure out why the first method of adding Vertex objects into GUI_Vertices always works and the second method only sometimes works.
Here is a link to the entire project https://github.com/kklouzal/WorldEngine/blob/GUI_Indirect_Draw/Vulkan/VulkanGWEN.hpp
After recompiling the project the second method will occasionally work so I'm getting some undefined behavior here. I have checked the validity of GUI_Vertices up until the point where we do our memcpy and the data appears to be valid so I'm not sure whats going on.
I would like to get the second method working so I can conditionally add new vertices into the buffer.
NewVertex.pos.x = ((float)x / 400) - 1.f;
NewVertex.pos.y = ((float)y / 300) - 1.f;
...
glm::vec3 pos;
emplace_back will always perform value initialization on the object it creates, which initializes all of the data members. By contrast, Vertex NewVertex; will default-initialize the object, which leaves its members uninitialized (since the GLM types have trivial default constructors).
So pos.z is uninitialized. And your code doesn't initialize it yourself. So you're sending uninitialized garbage to the GPU.
If you create the object with Vertex NewVertex{};, then it will be value-initialized, just like emplace_back does.
Im having an issue that I cannot seem to resolve.
Im importing obj files with TinyObjLoader, and copying vertex and index buffers to GPU memory to draw the model. There are only triangles in this model. No 4+ sided polygons, and no negative indices
The only problem is I cannot draw the full model, only 1/2 to 2/3 of the vertices are drawn, with correct vertices, correct normal, and correct textures.
I split the code in two methods with #defines to illustrate this issue.
Note, there is a heavy amount of trial and error that went into figuring this out to make it work, as there is a scarcity of tutorials and education online on DirectX11. If you see any issues with the way the code is structured please feel free to comment.
Method 1 is the failing code. Here I make duplicates of the vertices in order to have different normals. This is essential in my model because each surface needs a different shading. Note:I understand I can make this rendering more optimized with std::unordered_map for some duplicate vertices where normals point in the same direction.
#ifdef DUPVERTICES
std::vector<float> vertex_buffer;
std::vector<uint32_t> index_buffer;
struct T_Vertex
{
float vX;
float vY;
float vZ;
float nX;
float nY;
float nZ;
float tX;
float tY;
uint32_t vXIndex;
uint32_t vYIndex;
uint32_t vZIndex;
uint32_t nXIndex;
uint32_t nYIndex;
uint32_t nZIndex;
uint32_t tXIndex;
uint32_t tYIndex;
};
std::vector<T_Vertex> temp_vertices;
size_t index_offset = 0;
for (size_t f = 0; f < shapes[0].mesh.num_face_vertices.size(); f++) {
int fv = shapes[0].mesh.num_face_vertices[f];
// Loop over vertices in the face.
for (size_t v = 0; v < fv; v++) {
// access to vertex
tinyobj::index_t idx = shapes[0].mesh.indices[index_offset + v];
if (idx.vertex_index < 0 || idx.normal_index < 0 || idx.texcoord_index < 0)
continue;
T_Vertex temp_vertex;
temp_vertex.vX = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 0] : 0;
temp_vertex.vY = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 1] : 0;
temp_vertex.vZ = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 2] : 0;
temp_vertex.nX = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 0] : 0;
temp_vertex.nY = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 1] : 0;
temp_vertex.nZ = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 2] : 0;
temp_vertex.tX = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 0] : 0;
temp_vertex.tY = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 1] : 0;
temp_vertex.vXIndex = 3 * idx.vertex_index + 0;
temp_vertex.vYIndex = 3 * idx.vertex_index + 1;
temp_vertex.vZIndex = 3 * idx.vertex_index + 2;
temp_vertex.nXIndex = 3 * idx.normal_index + 0;
temp_vertex.nYIndex = 3 * idx.normal_index + 1;
temp_vertex.nZIndex = 3 * idx.normal_index + 2;
temp_vertex.tXIndex = 2 * idx.texcoord_index + 0;
temp_vertex.tYIndex = 2 * idx.texcoord_index + 1;
temp_vertices.push_back(temp_vertex);
}
index_offset += fv;
}
for (auto& temp_vertex : temp_vertices)
{
vertex_buffer.push_back(temp_vertex.vX);
vertex_buffer.push_back(temp_vertex.vY);
vertex_buffer.push_back(temp_vertex.vZ);
vertex_buffer.push_back(temp_vertex.nX);
vertex_buffer.push_back(temp_vertex.nY);
vertex_buffer.push_back(temp_vertex.nZ);
vertex_buffer.push_back(temp_vertex.tX); //Set to 0 for no texture
vertex_buffer.push_back(temp_vertex.tY); //Set to 0 for no texture
vertex_buffer.push_back(0.0F);
index_buffer.push_back(temp_vertex.vXIndex);
index_buffer.push_back(temp_vertex.vYIndex);
index_buffer.push_back(temp_vertex.vZIndex);
index_buffer.push_back(temp_vertex.nXIndex);
index_buffer.push_back(temp_vertex.nYIndex);
index_buffer.push_back(temp_vertex.nZIndex);
index_buffer.push_back(0);
index_buffer.push_back(0);
index_buffer.push_back(0);
}
Method 2 works (minus textures) but there are no duplicate vertices, and therefore the normal orientation is not good for rendering. However, in this configuration, every single vertex is drawn in correct place. Notice the stride and vertex and index buffer structures are the same in both codes, its the size of the buffers that differs.
#else
std::vector<float> vertex_buffer;
for (int i = 0, j = 0; i < attrib.vertices.size(); i += 3, j += 2)
{
vertex_buffer.push_back(attrib.vertices[i + 0]);
vertex_buffer.push_back(attrib.vertices[i + 1]);
vertex_buffer.push_back(attrib.vertices[i + 2]);
vertex_buffer.push_back(attrib.normals[i + 0]);
vertex_buffer.push_back(attrib.normals[i + 1]);
vertex_buffer.push_back(attrib.normals[i + 2]);
vertex_buffer.push_back(0);//attrib.texcoords[j + 0]);
vertex_buffer.push_back(0);//attrib.texcoords[j + 1]);
vertex_buffer.push_back(0.0F);
}
std::vector<UINT> index_buffer;
for (int i = 0, j = 0; i < shapes[0].mesh.indices.size(); i += 3, j += 2)
{
index_buffer.push_back(shapes[0].mesh.indices[i + 0].vertex_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 1].vertex_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 2].vertex_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 0].normal_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 1].normal_index);
index_buffer.push_back(shapes[0].mesh.indices[i + 2].normal_index);
index_buffer.push_back(0);
index_buffer.push_back(0);
index_buffer.push_back(0);
}
uint32_t vertexes_size = vertex_buffer.size() * sizeof(float);
uint32_t indexes_size = index_buffer.size() * sizeof(uint32_t);
int stride_bytes = 36;
#endif
This is where I create buffers, its the same code for both methods
//Set Vertex Buffer Array
g_Mesh11.m_pMeshArray = new SDKMESH_MESH;
g_Mesh11.m_pVertexBufferArray = new SDKMESH_VERTEX_BUFFER_HEADER;
int t = g_Mesh11.m_pMeshArray[0].VertexBuffers[0];
g_Mesh11.m_pMeshArray[0].VertexBuffers[0] = 0;
D3D11_BUFFER_DESC vertex_buf_desc;
vertex_buf_desc.ByteWidth = vertexes_size;
vertex_buf_desc.Usage = D3D11_USAGE_DEFAULT;
vertex_buf_desc.BindFlags = D3D11_BIND_VERTEX_BUFFER;
vertex_buf_desc.CPUAccessFlags = 0;
vertex_buf_desc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA init_vertex_data;
init_vertex_data.pSysMem = &vertex_buffer[0];
dxCtr->m_pDevice->CreateBuffer(&vertex_buf_desc, &init_vertex_data, &g_Mesh11.m_pVertexBufferArray[g_Mesh11.m_pMeshArray[0].VertexBuffers[0]].pVB11);
g_Mesh11.m_pVertexBufferArray[g_Mesh11.m_pMeshArray[0].VertexBuffers[0]].StrideBytes = stride_bytes;
g_Mesh11.m_pVertexBufferArray[g_Mesh11.m_pMeshArray[0].VertexBuffers[0]].SizeBytes = vertexes_size;
//Set Index Buffer array
g_Mesh11.m_pMeshArray[0].IndexBuffer = 0;
g_Mesh11.m_pIndexBufferArray = new SDKMESH_INDEX_BUFFER_HEADER;
g_Mesh11.m_pIndexBufferArray[g_Mesh11.m_pMeshArray[0].IndexBuffer].IndexType = IT_32BIT;
D3D11_BUFFER_DESC index_buf_desc;
index_buf_desc.ByteWidth = indexes_size;
index_buf_desc.Usage = D3D11_USAGE_DEFAULT;
index_buf_desc.BindFlags = D3D11_BIND_INDEX_BUFFER;
index_buf_desc.CPUAccessFlags = 0;
index_buf_desc.MiscFlags = 0;
D3D11_SUBRESOURCE_DATA init_index_data;
init_index_data.pSysMem = &index_buffer[0];
dxCtr->m_pDevice->CreateBuffer(&index_buf_desc, &init_index_data, &g_Mesh11.m_pIndexBufferArray[g_Mesh11.m_pMeshArray[0].IndexBuffer].pIB11);
g_Mesh11.m_pIndexBufferArray[g_Mesh11.m_pMeshArray[0].IndexBuffer].SizeBytes = indexes_size;
//Set subset
SDKMESH_SUBSET v_subset;
v_subset.MaterialID = 0;
v_subset.PrimitiveType = PT_TRIANGLE_LIST;
v_subset.IndexCount = index_buffer.size();
v_subset.VertexCount = vertex_buffer.size();
v_subset.VertexStart = 0;
v_subset.IndexStart = 0;
g_Mesh11.m_pMeshArray[0].pSubsets = new uint32_t;
g_Mesh11.m_pMeshArray[0].pSubsets[0] = 0;
g_Mesh11.m_pMeshArray[0].NumSubsets = 1;
g_Mesh11.m_pSubsetArray = new SDKMESH_SUBSET;
g_Mesh11.m_pSubsetArray[g_Mesh11.m_pMeshArray[0].pSubsets[0]] = v_subset;
Additional information:
I tried with lower poly count models and the issue is still there, so I am not hitting some limit of vertices.
Here are the sizes of the arrays
attrib.vertices.size = 150201
attrib.normals.size = 173712
attrib.normals.size = 135956
shapes[0].mesh.indices.size() = 300978
In Method 1 in the above example
temp_vertices.size() = 300978 (matches index size, so I'm not missing any vertices in temp_vertices)
vertex_buffer.size() = index_buffer.size() = 2708802
In Method 2 in the above example
vertex_buffer.size() = index_buffer.size() = 450603
Here is the input layout
// Create our vertex input layout
const D3D11_INPUT_ELEMENT_DESC layout[] =
{
{ "POSITION", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "NORMAL", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 12, D3D11_INPUT_PER_VERTEX_DATA, 0 },
{ "TEXCOORD", 0, DXGI_FORMAT_R32G32_FLOAT, 0, 24, D3D11_INPUT_PER_VERTEX_DATA, 0 },
};
Here is the Vertex Shader
cbuffer cbPerObject : register( b0 )
{
matrix g_mWorldViewProjection : packoffset( c0 );
matrix g_mWorld : packoffset( c4 );
};
//--------------------------------------------------------------------------------------
// Input / Output structures
//--------------------------------------------------------------------------------------
struct VS_INPUT
{
float4 vPosition : POSITION;
float3 vNormal : NORMAL;
float2 vTexcoord : TEXCOORD0;
};
struct VS_OUTPUT
{
float3 vNormal : NORMAL;
float2 vTexcoord : TEXCOORD0;
float4 vPosition : SV_POSITION;
};
//--------------------------------------------------------------------------------------
// Vertex Shader
//--------------------------------------------------------------------------------------
VS_OUTPUT VSMain( VS_INPUT Input )
{
VS_OUTPUT Output;
Output.vPosition = mul( Input.vPosition, g_mWorldViewProjection );
Output.vNormal = mul( Input.vNormal, (float3x3)g_mWorld );
Output.vTexcoord = Input.vTexcoord;
return Output;
}
I realize there are inconsistencies between the input layout, the shader, and my vertex_buffer vector. Mainly, the input layout is 32 bytes, my buffer is 36 bytes with 3 bytes for texcoord, and the shader is 36 bytes, but the position is 4 bytes, and texcoords are 2 bytes.
The shader and initialization was from DXUT and I did not mess with those. It draws the supplied Tiny.sdkmesh model correctly, which incidentally has a stride of 32 bytes, matching that of input layout.
If somebody can help explain why the shader VS_INPUT differs from the input layout, it would greatly help as well.
Changing the vertex shader to math input layout resulted in compilation errors. Changing input layout to add an extra byte to texcoord did not really make a difference.
Note: I tried removing the vertex_buffer.push_back(0.0F); and index_buffer.push_back(0); from the end and changing stride_bytes to 32, but it was no longer drawing vertices correctly.
I've ran out of trial and error methods to test and looking for help figuring out what I'm doing wrong.
Thank you,
After dissecting the working tiny.sdkmesh file, I found what I was doing wrong.
The index is only 1 entry/byte per vertex (which is 32 bytes)
I tried it before, but not successfully, here is the working code:
Still looking for some explanation to help understand why the Input_Layout differs from Vertex Shader in number of bytes
std::vector<float> vertex_buffer;
std::vector<uint32_t> index_buffer;
struct T_Vertex
{
float vX;
float vY;
float vZ;
float nX;
float nY;
float nZ;
float tX;
float tY;
uint32_t vIndex;
};
std::vector<T_Vertex> temp_vertices;
size_t index_offset = 0;
int ind = 0;
for (size_t f = 0; f < shapes[0].mesh.num_face_vertices.size(); f++) {
int fv = shapes[0].mesh.num_face_vertices[f];
// Loop over vertices in the face.
for (size_t v = 0; v < fv; v++) {
// access to vertex
tinyobj::index_t idx = shapes[0].mesh.indices[index_offset + v];
if (idx.vertex_index < 0 || idx.normal_index < 0 || idx.texcoord_index < 0)
continue;
T_Vertex temp_vertex;
temp_vertex.vX = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 0] : 0;
temp_vertex.vY = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 1] : 0;
temp_vertex.vZ = idx.normal_index > 0 ? attrib.vertices[3 * idx.vertex_index + 2] : 0;
temp_vertex.nX = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 0] : 0;
temp_vertex.nY = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 1] : 0;
temp_vertex.nZ = idx.normal_index > 0 ? attrib.normals[3 * idx.normal_index + 2] : 0;
temp_vertex.tX = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 0] : 0;
temp_vertex.tY = idx.texcoord_index > 0 ? attrib.texcoords[2 * idx.texcoord_index + 1] : 0;
temp_vertices.push_back(temp_vertex);
temp_vertex.vIndex = ++ind;
}
index_offset += fv;
}
for (auto& temp_vertex : temp_vertices)
{
vertex_buffer.push_back(temp_vertex.vX);
vertex_buffer.push_back(temp_vertex.vY);
vertex_buffer.push_back(temp_vertex.vZ);
vertex_buffer.push_back(temp_vertex.nX);
vertex_buffer.push_back(temp_vertex.nY);
vertex_buffer.push_back(temp_vertex.nZ);
vertex_buffer.push_back(temp_vertex.tX); //Set to 0 for no texture
vertex_buffer.push_back(temp_vertex.tY); //Set to 0 for no texture
index_buffer.push_back(temp_vertex.vIndex);
}
uint32_t vertexes_size = vertex_buffer.size() * sizeof(float);
uint32_t indexes_size = index_buffer.size() * sizeof(uint32_t);
int stride_bytes = 32;
I try to implement Order Independent Transparency on my own. It is like finished without one thing... As you can see in the picture below, the OIT with MSAA is some kind of wrong. I think it is because of the samples. because at each triangle edge there are 4 samples (and only at triangle edges).
Alphablending and OIT with and without MSAA
here also the shader code in HLSL:
Create the lists
RWByteAddressBuffer tRWFragmentList : register(u1);
void main(PS_INPUT input)
{
float2 position = (input.Pos.xy - float2(0.5,0.5)) / input.Pos.w;
uint nXPosition = position.x;
uint nYPosition = position.y;
uint vScreenAddress = nScreenWidth * nYPosition + nXPosition;
float3 Normal = normalize((float3)input.Normal);
float3 Position = (float3)input.Pos;
float4 Color = createphong(input);
//float4 Color = (float4)input.Diffuse;
// Get counter value and increment
uint nNewFragmentAddress = 0;
tRWFragmentList.InterlockedAdd(0, 44, nNewFragmentAddress);
if (nNewFragmentAddress < 1000*1000*500)
{
uint pixel = 4 + nScreenWidth * nScreenHeight * 4 + nNewFragmentAddress;
tRWFragmentList.Store(pixel + 4, asuint(Position.x));
tRWFragmentList.Store(pixel + 8, asuint(Position.y));
tRWFragmentList.Store(pixel + 12, asuint(Position.z));
tRWFragmentList.Store(pixel + 16, asuint(Normal.x));
tRWFragmentList.Store(pixel + 20, asuint(Normal.y));
tRWFragmentList.Store(pixel + 24, asuint(Normal.z));
tRWFragmentList.Store(pixel + 28, asuint(Color.r));
tRWFragmentList.Store(pixel + 32, asuint(Color.g));
tRWFragmentList.Store(pixel + 36, asuint(Color.b));
tRWFragmentList.Store(pixel + 40, asuint(Color.a));
uint output = 0;
tRWFragmentList.InterlockedExchange(vScreenAddress * 4 + 4, pixel, output);
tRWFragmentList.Store(pixel, output);
}
}
Sort the lists
RWByteAddressBuffer tRWFragmentList : register(u1);
float4 main(PS_INPUT input) : SV_Target
{
float2 position = (input.Pos.xy - float2(0.5,0.5)) / input.Pos.w;
uint nXPosition = position.x;
uint nYPosition = position.y;
uint vScreenAddress = 4+(nScreenWidth * nYPosition + nXPosition) * 4;
if (tRWFragmentList.Load(vScreenAddress) != 0)
{
uint i = vScreenAddress;
uint j = vScreenAddress;
float zMin = 0;
uint zMinPrev = i;
do
{
i = j;
zMin = asfloat(tRWFragmentList.Load(tRWFragmentList.Load(i) + 12));
zMinPrev = i;
do
{
if (asfloat(tRWFragmentList.Load(tRWFragmentList.Load(i) + 12)) > zMin)
{
zMin = asfloat(tRWFragmentList.Load(tRWFragmentList.Load(i) + 12));
zMinPrev = i;
}
i = tRWFragmentList.Load(i);
}
while (tRWFragmentList.Load(i) > 0);
//check swap
if (zMinPrev != j)
{
uint trwJ = tRWFragmentList.Load(j);
uint trwtrwMin = tRWFragmentList.Load(tRWFragmentList.Load(zMinPrev));
uint trwMin = tRWFragmentList.Load(zMinPrev);
tRWFragmentList.Store(j,trwMin);
tRWFragmentList.Store(zMinPrev,trwtrwMin);
tRWFragmentList.Store(trwMin,trwJ);
}
j = tRWFragmentList.Load(j);
}
while (tRWFragmentList.Load(j) > 0);
}
return float4(1, 0, 1, 1);
}
Render the finished picture
RWByteAddressBuffer tRWFragmentList : register(u1);
float4 main(PS_INPUT input) : SV_Target
{
float2 position = (input.Pos.xy - float2(0.5,0.5)) / input.Pos.w;
uint nXPosition = position.x;
uint nYPosition = position.y;
uint vScreenAddress = nScreenWidth * nYPosition + nXPosition;
float3 Color = float3(0.5, 0.5, 0.5);
uint nScreenAdress = vScreenAddress*4+4;
while (tRWFragmentList.Load(nScreenAdress) != 0)
{
nScreenAdress = tRWFragmentList.Load(nScreenAdress);
float4 NewColor = float4(asfloat(tRWFragmentList.Load(nScreenAdress + 28)),
asfloat(tRWFragmentList.Load(nScreenAdress + 32)),
asfloat(tRWFragmentList.Load(nScreenAdress + 36)),
asfloat(tRWFragmentList.Load(nScreenAdress + 40)));
float fZValue = asfloat(tRWFragmentList.Load(nScreenAdress + 12));
Color = NewColor.a * NewColor.rgb + (1 - NewColor.a) * Color.rgb;
}
tRWFragmentList.Store(vScreenAddress * 4 + 4, 0);
if (nXPosition == 0 && nYPosition)
{
tRWFragmentList.Store(0, 0);
}
return float4(Color.r, Color.g, Color.b, 1);
}
My idea is to write the sample number inside the list, and at the end when I render the endpicture, I compare the list nodes and if they are to close together I want to check the sample number and calculate the average color.
But I don't know how to get the aktual sample number...
BTW: does some one have a better idear to fix this bug? It does not need to be a fast calculation, I don't render in realtime.
You have to use sv_coverage to read a mask in the pixel shader of the touched fragments.
With it you resolve the transparency (and msaa in one go) by accumulating into N values ( N as MSAA Nx ) according to the coverage, then average and output.
If you want to output in the msaa surface instead prior to the resolve, you have to use a compute shader to be able to do the accumulation once then write the N values separately.
I would go compute for everything but the actual mesh render, it is more convenient than pixel shader for that kind of processing
I'm working on just making uniformly colors spheres for a project and I'm running into an issue. The spheres run fine but when I try to color them with glColorPointer they stop appearing. OpenGL isn't showing any errors when I call glGetError so I'm at a loss for why this would happen.
The code to generate the vertices, colors etc:
void SphereObject::setupVertices()
{
//determine the array sizes
//vertices per row (+1 for the repeated one at the end) * three for each coordinate
//times the number of rows
int arraySize = myNumVertices * 3;
myNumIndices = (myVerticesPerRow + 1) * myRows * 2;
myVertices = new GLdouble[arraySize];
myIndices = new GLuint[myNumIndices];
myNormals = new GLdouble[arraySize];
myColors = new GLint[myNumVertices * 4];
//use spherical coordinates to calculate the vertices
double phiIncrement = 360 / myVerticesPerRow;
double thetaIncrement = 180 / (double)myRows;
int arrayIndex = 0;
int colorArrayIndex = 0;
int indicesIndex = 0;
double x, y, z = 0;
for(double theta = 0; theta <= 180; theta += thetaIncrement)
{
//loop including the repeat for the last vertex
for(double phi = 0; phi <= 360; phi += phiIncrement)
{
//make sure that the last vertex is repeated
if(360 - phi < phiIncrement)
{
x = myRadius * sin(radians(theta)) * cos(radians(0));
y = myRadius * sin(radians(theta)) * sin(radians(0));
z = myRadius * cos(radians(theta));
}
else
{
x = myRadius * sin(radians(theta)) * cos(radians(phi));
y = myRadius * sin(radians(theta)) * sin(radians(phi));
z = myRadius * cos(radians(theta));
}
myColors[colorArrayIndex] = myColor.getX();
myColors[colorArrayIndex + 1] = myColor.getY();
myColors[colorArrayIndex + 2] = myColor.getZ();
myColors[colorArrayIndex + 3] = 1;
myVertices[arrayIndex] = x;
myVertices[arrayIndex + 1] = y;
myVertices[arrayIndex + 2] = z;
if(theta <= 180 - thetaIncrement)
{
myIndices[indicesIndex] = arrayIndex / 3;
myIndices[indicesIndex + 1] = (arrayIndex / 3) + myVerticesPerRow + 1;
indicesIndex += 2;
}
arrayIndex += 3;
colorArrayIndex += 4;
}
}
}
And the code to actually render the thing
void SphereObject::render()
{
glPushMatrix();
glPushClientAttrib(GL_CLIENT_VERTEX_ARRAY_BIT);
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(4, GL_INT, 0, myColors);
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_DOUBLE, 0, myVertices);
glDrawElements(GL_QUAD_STRIP, myNumIndices, GL_UNSIGNED_INT, myIndices);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glPopClientAttrib();
glPopMatrix();
}
Any and all help would be appreciated. I'm really having a hard time for some reason.
When you use GL_INT (or any integer type) for color pointer, it linearly maps the largest possible integer value to 1.0f (maximum color), and 0 to 0.0f (minimum color).
Therefore unless your values of RGB and A are in the billions, they will likely appear completely black (or transparent if that's enabled). I see that you've got alpha = 1, which will essentially be zero after conversion to float.
well I think I know what the problem is. I am just having a hard time debugging it. I am working with the directx api and I am trying to generate a plane along the x and z axis according to a book I have. The problem is when I am creating my indices. I think I am setting values out of the bounds of the indices array. I am just having a hard time figuring out what I did wrong. I am unfamiliar with the this method of generating a plane. so its a little difficult for me. below is my code. Take emphasis on the indices loop.
[edit]
Ive been reviewing it. This is how the indices works
int curVertex = x + (z * NUM_VERTSX);
This always gets the beginning vertices. so say we have 17 vertices on the x axis and 17 vertices on the z axis and we are on the first loop of the x and z axis
curVertx = 0 + (0 * 17)
curVertx = 0 + 0 = 0
say we are on the first loop of the z axis and second loop of the x axis
curVertx = 1 + (0 * 17)
curVertx = 1+ 0 = 1
indices[curIndex] = curVertex;
indices[curIndex + 1] = curVertex + NUM_VERTSX;
indices[curIndex + 2] = curVertex + 1;
indices[curIndex + 3] = curVertex + 1;
indices[curIndex + 4] = curVertex + NUM_VERTSX;
indices[curIndex + 5] = curVertex + NUM_VERTSX + 1;
if we are on the first
loop indices[curIndex] = curVertex;
this equals the first vertex = 0.
indices[curIndex + 1] = curVertex + NUM_VERTSX;
this equals the second row vertices (its always the vertices below the starting vertices
x x x x
[x] x x x
#include "MyGame.h"
//#include "CubeVector.h"
/* This code sets a projection and shows a turning cube. What has been added is the project, rotation and
a rasterizer to change the rasterization of the cube. The issue that was going on was something with the effect file
which was causing the vertices not to be rendered correctly.*/
typedef struct
{
ID3D10Effect* pEffect;
ID3D10EffectTechnique* pTechnique;
//vertex information
ID3D10Buffer* pVertexBuffer;
ID3D10Buffer* pIndicesBuffer;
ID3D10InputLayout* pVertexLayout;
UINT numVertices;
UINT numIndices;
}ModelObject;
ModelObject modelObject;
// World Matrix
D3DXMATRIX WorldMatrix;
// View Matrix
D3DXMATRIX ViewMatrix;
// Projection Matrix
D3DXMATRIX ProjectionMatrix;
ID3D10EffectMatrixVariable* pProjectionMatrixVariable = NULL;
//grid information
#define NUM_COLS 16
#define NUM_ROWS 16
#define CELL_WIDTH 32
#define CELL_HEIGHT 32
#define NUM_VERTSX (NUM_COLS + 1)
#define NUM_VERTSY (NUM_ROWS + 1)
bool MyGame::InitDirect3D()
{
if(!DX3dApp::InitDirect3D())
{
return false;
}
D3D10_RASTERIZER_DESC rastDesc;
rastDesc.FillMode = D3D10_FILL_WIREFRAME;
rastDesc.CullMode = D3D10_CULL_FRONT;
rastDesc.FrontCounterClockwise = true;
rastDesc.DepthBias = false;
rastDesc.DepthBiasClamp = 0;
rastDesc.SlopeScaledDepthBias = 0;
rastDesc.DepthClipEnable = false;
rastDesc.ScissorEnable = false;
rastDesc.MultisampleEnable = false;
rastDesc.AntialiasedLineEnable = false;
ID3D10RasterizerState *g_pRasterizerState;
mpD3DDevice->CreateRasterizerState(&rastDesc, &g_pRasterizerState);
mpD3DDevice->RSSetState(g_pRasterizerState);
// Set up the World Matrix
D3DXMatrixIdentity(&WorldMatrix);
D3DXMatrixLookAtLH(&ViewMatrix, new D3DXVECTOR3(0.0f, 10.0f, -20.0f), new D3DXVECTOR3(0.0f, 0.0f, 0.0f), new D3DXVECTOR3(0.0f, 1.0f, 0.0f));
// Set up the projection matrix
D3DXMatrixPerspectiveFovLH(&ProjectionMatrix, (float)D3DX_PI * 0.5f, (float)mWidth/(float)mHeight, 0.1f, 100.0f);
if(!CreateObject())
{
return false;
}
return true;
}
//These are actions that take place after the clearing of the buffer and before the present
void MyGame::GameDraw()
{
static float rotationAngle = 0.0f;
// create the rotation matrix using the rotation angle
D3DXMatrixRotationY(&WorldMatrix, rotationAngle);
rotationAngle += (float)D3DX_PI * 0.0f;
// Set the input layout
mpD3DDevice->IASetInputLayout(modelObject.pVertexLayout);
// Set vertex buffer
UINT stride = sizeof(VertexPos);
UINT offset = 0;
mpD3DDevice->IASetVertexBuffers(0, 1, &modelObject.pVertexBuffer, &stride, &offset);
mpD3DDevice->IASetIndexBuffer(modelObject.pIndicesBuffer, DXGI_FORMAT_R32_UINT, 0);
// Set primitive topology
mpD3DDevice->IASetPrimitiveTopology(D3D10_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
// Combine and send the final matrix to the shader
D3DXMATRIX finalMatrix = (WorldMatrix * ViewMatrix * ProjectionMatrix);
pProjectionMatrixVariable->SetMatrix((float*)&finalMatrix);
// make sure modelObject is valid
// Render a model object
D3D10_TECHNIQUE_DESC techniqueDescription;
modelObject.pTechnique->GetDesc(&techniqueDescription);
// Loop through the technique passes
for(UINT p=0; p < techniqueDescription.Passes; ++p)
{
modelObject.pTechnique->GetPassByIndex(p)->Apply(0);
// draw the cube using all 36 vertices and 12 triangles
mpD3DDevice->DrawIndexed(modelObject.numIndices,0,0);
}
}
//Render actually incapsulates Gamedraw, so you can call data before you actually clear the buffer or after you
//present data
void MyGame::Render()
{
DX3dApp::Render();
}
bool MyGame::CreateObject()
{
VertexPos vertices[NUM_VERTSX * NUM_VERTSY];
for(int z=0; z < NUM_VERTSY; ++z)
{
for(int x = 0; x < NUM_VERTSX; ++x)
{
vertices[x + z * NUM_VERTSX].pos.x = (float)x * CELL_WIDTH;
vertices[x + z * NUM_VERTSX].pos.z = (float)z * CELL_HEIGHT;
vertices[x + z * NUM_VERTSX].pos.y = 0.0f;
vertices[x + z * NUM_VERTSX].color = D3DXVECTOR4(1.0, 0.0f, 0.0f, 0.0f);
}
}
DWORD indices[NUM_VERTSX * NUM_VERTSY];
int curIndex = 0;
for(int z=0; z < NUM_ROWS; ++z)
{
for(int x = 0; x < NUM_COLS; ++x)
{
int curVertex = x + (z * NUM_VERTSX);
indices[curIndex] = curVertex;
indices[curIndex + 1] = curVertex + NUM_VERTSX;
indices[curIndex + 2] = curVertex + 1;
indices[curIndex + 3] = curVertex + 1;
indices[curIndex + 4] = curVertex + NUM_VERTSX;
indices[curIndex + 5] = curVertex + NUM_VERTSX + 1;
curIndex += 6;
}
}
//Create Layout
D3D10_INPUT_ELEMENT_DESC layout[] = {
{"POSITION",0,DXGI_FORMAT_R32G32B32_FLOAT, 0 , 0, D3D10_INPUT_PER_VERTEX_DATA, 0},
{"COLOR",0,DXGI_FORMAT_R32G32B32A32_FLOAT, 0 , 12, D3D10_INPUT_PER_VERTEX_DATA, 0}
};
UINT numElements = (sizeof(layout)/sizeof(layout[0]));
modelObject.numVertices = sizeof(vertices)/sizeof(VertexPos);
//Create buffer desc
D3D10_BUFFER_DESC bufferDesc;
bufferDesc.Usage = D3D10_USAGE_DEFAULT;
bufferDesc.ByteWidth = sizeof(VertexPos) * modelObject.numVertices;
bufferDesc.BindFlags = D3D10_BIND_VERTEX_BUFFER;
bufferDesc.CPUAccessFlags = 0;
bufferDesc.MiscFlags = 0;
D3D10_SUBRESOURCE_DATA initData;
initData.pSysMem = vertices;
//Create the buffer
HRESULT hr = mpD3DDevice->CreateBuffer(&bufferDesc, &initData, &modelObject.pVertexBuffer);
if(FAILED(hr))
return false;
modelObject.numIndices = sizeof(indices)/sizeof(DWORD);
bufferDesc.ByteWidth = sizeof(DWORD) * modelObject.numIndices;
bufferDesc.BindFlags = D3D10_BIND_INDEX_BUFFER;
initData.pSysMem = indices;
hr = mpD3DDevice->CreateBuffer(&bufferDesc, &initData, &modelObject.pIndicesBuffer);
if(FAILED(hr))
return false;
/////////////////////////////////////////////////////////////////////////////
//Set up fx files
LPCWSTR effectFilename = L"effect.fx";
modelObject.pEffect = NULL;
hr = D3DX10CreateEffectFromFile(effectFilename,
NULL,
NULL,
"fx_4_0",
D3D10_SHADER_ENABLE_STRICTNESS,
0,
mpD3DDevice,
NULL,
NULL,
&modelObject.pEffect,
NULL,
NULL);
if(FAILED(hr))
return false;
pProjectionMatrixVariable = modelObject.pEffect->GetVariableByName("Projection")->AsMatrix();
//Dont sweat the technique. Get it!
LPCSTR effectTechniqueName = "Render";
modelObject.pTechnique = modelObject.pEffect->GetTechniqueByName(effectTechniqueName);
if(modelObject.pTechnique == NULL)
return false;
//Create Vertex layout
D3D10_PASS_DESC passDesc;
modelObject.pTechnique->GetPassByIndex(0)->GetDesc(&passDesc);
hr = mpD3DDevice->CreateInputLayout(layout, numElements,
passDesc.pIAInputSignature,
passDesc.IAInputSignatureSize,
&modelObject.pVertexLayout);
if(FAILED(hr))
return false;
return true;
}
Your indices array contains 6 entries per 'cell' (since you're drawing two triangles for each), therefore it should be declared as
DWORD indices[NUM_ROWS * NUM_COLS * 6]
The error you get tells you, that you write outside the boundaries of indices, this is usually either a hint towards a wrong declaration (or a wrong index calculation).
Now let us take the code snippet in question (probable root cause)
Code
DWORD indices[NUM_VERTSX * NUM_VERTSY];
int curIndex = 0;
for(int z=0; z < NUM_ROWS; ++z)
{
for(int x = 0; x < NUM_COLS; ++x)
{
int curVertex = x + (z * NUM_VERTSX);
indices[curIndex] = curVertex;
indices[curIndex + 1] = curVertex + NUM_VERTSX;
indices[curIndex + 2] = curVertex + 1;
indices[curIndex + 3] = curVertex + 1;
indices[curIndex + 4] = curVertex + NUM_VERTSX;
indices[curIndex + 5] = curVertex + NUM_VERTSX + 1;
curIndex += 6;
}
}
Analysis
Here indices have max number of 'cells' = NUM_VERTX * NUM_VERTSY = (16 + 1) * (16+1) = 289. So there are 0...288 'cells'. During the boundary condition - there value of z = 15, x = 15. So curIndex would be 15 * 15 * 6 = 1350. This far exceeds allocated cells.
Suggestion
Since three values determine the size of the target array, all three must be part of the allocation of the array. so if you use DWORD indices[NUM_VERTSX * NUM_VERTSY * UNIT_BLOCK], where UNIT_BLOCK = 6, it should work fine.
Also instead of embedding magic number inside the code, you can use a const variable - it would help a great deal later (if you want to change the value of the index).
HTH