Auto-generated vertex buffer and index buffer not working - c++

In short, I created a function in c++ to create a vertex buffer array and an index buffer array for me based on a vector of vertexes i.e. if you enter 4 points in the vector of vertexes the function should in theory return 2 arrays to be used in the vertex buffer and index buffer. However, this is where the problem arises. After the function returns the arrays, buffers initialised and glDrawElements is called, only one of the 2 (for a square) triangles that make up the square are drawn. I am very confused why.
Here is the code,
Struct defining the stuff the function returns:
struct ResultDataBuffer {
std::vector<float> positions;
std::vector<unsigned int> indices;
unsigned int psize;
unsigned int isize;
unsigned int bpsize;
unsigned int bisize;
};
//positions and indices are the vectors for the buffers (to be converted to arrays)
//psize and isize are the sizes of the vectors
//bpsize and bisize are the byte size of the vectors (i.e. sizeof())
The function itself:
static ResultDataBuffer CalculateBuffers(std::vector<float> vertixes) {
std::vector<float> positions = vertixes;
std::vector<unsigned int> indices;
int length = vertixes.size();
int l = length / 2;
int i = 0;
while (i < l - 2) { //The logic for the index buffer array. If the length of the vertexes is l, this array(vector here) should be 0,1,2 , 0,2,3 ... 0,l-2,l-1
i += 1;
indices.push_back(0);
indices.push_back(i + 1);
indices.push_back(i + 2);
}
return{ vertixes,indices, positions.size(), indices.size(), sizeof(float)*positions.size(), sizeof(unsigned int)*indices.size() };
}
The code in the main function (defenitions of buffers and stuff):
std::vector<float> vertixes = {
-0.5f, -0.5f,
0.5f, -0.5f,
0.5f, 0.5f,
-0.5f, 0.5f,
};
ResultDataBuffer rdb = CalculateBuffers(vertixes);
float* positions = rdb.positions.data(); //Convert vector into array
unsigned int* indices = rdb.indices.data(); //Ditto above
unsigned int buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, rdb.bpsize, positions, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 2, 0);
unsigned int ibo;
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, rdb.bisize, indices, GL_STATIC_DRAW);
Stuff in the main function (in game loop):
glDrawElements(GL_TRIANGLES, rdb.isize, GL_UNSIGNED_INT, nullptr);
Apologies for the length of this post, I tried to cut it down.
C++ full code: https://pastebin.com/ZGLSQm3b
Shader code (located in /res/shaders/Basic.shader): https://pastebin.com/C1ahVUD9
So to summarize, this code, instead of drawing a square - 2 triangles, draws one triangle only.

The issue is caused by the loop which generates the array of indices:
while (i < l - 2) {
i += 1;
indices.push_back(0);
indices.push_back(i + 1);
indices.push_back(i + 2);
}
This loop generates the indices
0, 2, 3, 0, 3, 4
But you have to generate the indices
0, 1, 2, 0, 2, 3
This is cause, because the control variable is incremented, before the indices are append to the vector.
Increment the control variable at the end of the loop, to solve the issue:
while (i < l - 2) {
indices.push_back(0);
indices.push_back(i + 1);
indices.push_back(i + 2);
i += 1;
}
Or use a for loop:
for (unsigned int i = 0; i < l-2; ++ i)
{
unsigned int t[]{ 0, i+1, i+2 };
indices.insert(indices.end(), t, t+3);
}

Related

Why is my Index Generation Function not correctly building the triangle primitives?

I am trying to code a function which automatically populates a mesh's index vector container. The function should work without issue in theory as it generates the proper indices in their correct order; however, the triangles do not form! Instead, I am left with a single line.
My mesh generation code is supposed to build an octahedron and then render it in the main game loop. The mesh class is shown below in its entirety:
struct vertex
{
glm::vec3 position;
glm::vec3 color;
};
class Mesh
{
public:
GLuint VAO, VBO, EBO;
std::vector <vertex> vtx;
std::vector <glm::vec3> idx;
glm::mat4 modelMatrix = glm::mat4(1.f);
Mesh(glm::vec3 position, glm::vec3 scale)
{
vertexGen(6);
idx = indexGen(6);
modelMatrix = glm::scale(glm::translate(modelMatrix, position), scale);
initMesh();
};
void Render(Shader shaderProgram, Camera camera, bool wireframe)
{
glUseProgram(shaderProgram.ID);
glPatchParameteri(GL_PATCH_VERTICES, 3); // Indicates to the VAO that each group of three vertices is one patch (triangles)
glProgramUniformMatrix4fv(shaderProgram.ID, 0, 1, GL_FALSE, glm::value_ptr(modelMatrix));
glProgramUniformMatrix4fv(shaderProgram.ID, 1, 1, GL_FALSE, glm::value_ptr(camera.camMatrix));
glProgramUniform3fv(shaderProgram.ID, 2, 1, glm::value_ptr(camera.Position));
glBindVertexArray(VAO); // Binds the VAO to the shader program
if (wireframe)
{
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glDisable(GL_CULL_FACE);
}
else
{
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
//glEnable(GL_CULL_FACE);
}
glDrawElements(GL_PATCHES, idx.size(), GL_UNSIGNED_INT, 0); // Tells the shader program how to draw the primitives
}
private:
void vertexGen(int n) {
// Populate the base six vertices
vtx.push_back(vertex{ glm::vec3( 0.0f, 0.5f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3(-0.5f, 0.0f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.0f, 0.0f, -0.5f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.5f, 0.0f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.0f, 0.0f, 0.5f), glm::vec3(0.f, 1.f, 0.f) });
vtx.push_back(vertex{ glm::vec3( 0.0f,-0.5f, 0.0f), glm::vec3(0.f, 1.f, 0.f) });
}
std::vector<glm::vec3> indexGen(int n) {
std::vector<glm::vec3> indices;
// Calculate the indices for the top 4 triangles
indices.push_back(glm::vec3( 0, n - 5, n - 4 ));
indices.push_back(glm::vec3( 0, n - 4, n - 3 ));
indices.push_back(glm::vec3( 0, n - 3, n - 2 ));
indices.push_back(glm::vec3( 0, n - 2, n - 5 ));
// Calculate the indices for the bottom 4 triangles
indices.push_back(glm::vec3( 5, n - 5, n - 4));
indices.push_back(glm::vec3( 5, n - 4, n - 3));
indices.push_back(glm::vec3( 5, n - 3, n - 2));
indices.push_back(glm::vec3( 5, n - 2, n - 5));
return indices;
}
void initMesh()
{
glCreateVertexArrays(1, &VAO); // Sets the address of the uint VAO as the location of a gl vertex array object
glCreateBuffers(1, &VBO); // Sets the address of the uint VBO as the location of a gl buffer object
glCreateBuffers(1, &EBO); // Sets the address of the uint EBO as the location of a gl buffer object
glNamedBufferData(VBO, vtx.size() * sizeof(vtx[0]), vtx.data(), GL_STATIC_DRAW); // Sets the data of the buffer named VBO
glNamedBufferData(EBO, idx.size() * sizeof(idx[0]), idx.data(), GL_STATIC_DRAW); // Sets the data of the buffer named EBO
glEnableVertexArrayAttrib(VAO, 0); // Enables an attribute of the VAO in location 0
glEnableVertexArrayAttrib(VAO, 1); // Enables an attribute of the VAO in location 1
glVertexArrayAttribBinding(VAO, 0, 0); // Layout Location of Position Vectors
glVertexArrayAttribBinding(VAO, 1, 0); // Layout Location of Color Values
glVertexArrayAttribFormat(VAO, 0, 3, GL_FLOAT, GL_FALSE, 0); // Size, and Type of Position Vectors
glVertexArrayAttribFormat(VAO, 1, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat)); // For the Color Values
glVertexArrayVertexBuffer(VAO, 0, VBO, 0, 6 * sizeof(GLfloat)); // Sets the VBO to indicate the start, offset, and stride of vertex data in the VAO
glVertexArrayElementBuffer(VAO, EBO); // Sets the EBO to index the VAO vertex connections
}
};
I took this problem step by step and did all of the basic math on paper. The index generation function returns the expected indices in their correct order as just having the indices written out, but it differs in that the written-out indices generate the desired result whereas the generation function only produces a single line when rendered:
I suspect that the issue lies in my mesh initialization function (initMesh), specifically in the glNamedBufferData or glVertexArrayVertexBuffer, but my knowledge of the functions is very limited. I tried changing the parameter of the glNamedBufferData function to different variations of idx.size()*sizeof(idx[0].x), but that yielded the same results, so I am at a loss. Could someone help me fix this, please?
glm::vec3 is a vector of floats (I think) but you are telling OpenGL to read them as unsigned ints.
Float 0.0 is 0x00000000 (i.e. same as int 0), but float 1.0 is 0x3f800000 (same as int 1065353216). They aren't compatible ways to store numbers. You could try glm::ivec3 which is a vector of ints, but I think most people would use std::vector<int> (or unsigned int) and use 3 entries per triangle.
I think it's okay in this case, but I don't like to use types like ivec3 when I mean to have 3 separate ints isn't always a good practice, because the compiler can insert padding in unexpected places. It's possible that on some platforms, ivec3 could be 3 ints and an extra 4 bytes of padding, making 16 bytes in total, and the extra padding bytes throw off the layout you're relying on. glDrawArrays wouldn't skip over padding after every 3 indices and there would be no way to tell it to do that. It's okay for vertices, since you can tell OpenGL exactly where the data is.

Troubles with laying out memory for use by OpenGL (glBufferSubData help needed)

So today I wanted to learn how I can use a Vector2 & Color class to send data to OpenGL with pretty clean looking syntax.
The plan is this.
/*
Vector2 (8 bytes)
Color (16 bytes)
Vector2 + Color = 24 bytes
How can memory be laid out like this?
{Vector2, Color},
{Vector2, Color},
{Vector2, Color}
*/
So I have my two arrays of data.
Vector2 vertices[] = {
Vector2(0.0f, 0.5f),
Vector2(0.5f, -0.5f),
Vector2(-0.5f, -0.5f)
};
// colors would be mapped to there respective indexes in the vertices array (i.e colors[0] is mapped to vertices[0])
Color colors[] = {
Color(1.0f, 0.3f, 0.3f),
Color(0.3f, 1.0f, 0.3f),
Color(0.3f, 0.3f, 1.0f)
};
I was able to get a regular triangle rendering at the correct points,
but sending over the color data has been pretty difficult for me to pull off.
The result I currently get is this.
Here's a snippet of the code.
// positions on screen
Vector2 vertices[] = {
Vector2(0.0f, 0.5f),
Vector2(0.5f, -0.5f),
Vector2(-0.5f, -0.5f)
};
// colors for each position
Color colors[] = {
Color(1.0f, 0.3f, 0.3f),
Color(0.3f, 1.0f, 0.3f),
Color(0.3f, 0.3f, 1.0f)
};
// create vertex and array buffers (each bound automatically)
unsigned int vertexArray = createVertexArray();
unsigned int vertexBuffer = createBuffer(GL_ARRAY_BUFFER);
// allocate the data
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices) + sizeof(colors), nullptr, GL_STATIC_DRAW);
// fill up allocated memory
for (int i = 0; i < 3; ++i) {
glBufferSubData(GL_ARRAY_BUFFER, sizeof(Vector2) * i, sizeof(Vector2), &vertices[i]);
glBufferSubData(GL_ARRAY_BUFFER, (sizeof(Vector2) + sizeof(Color)) * (i + 1), sizeof(Color), &colors[i]);
}
// set up vertex attributes
glVertexAttribPointer(0, 2, GL_FLOAT, false, sizeof(Vector2) + sizeof(Color), nullptr);
glVertexAttribPointer(1, 3, GL_FLOAT, false, sizeof(Vector2) + sizeof(Color), (const void*)( sizeof(Vector2) ));
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
You need to add 1 vertex and 1 color alternately to the buffer:
for (int i = 0; i < 3; ++i)
{
GLintptr offsetV = i * (sizeof(Vector2) + sizeof(Color));
glBufferSubData(GL_ARRAY_BUFFER, offsetV, sizeof(Vector2), &vertices[i]);
GLintptr offsetC = offsetV + sizeof(Vector2);
glBufferSubData(GL_ARRAY_BUFFER, offsetC, sizeof(Color), &colors[i]);
}

VBO Generate Verts for Chunks (Voxel Engine, Cpp)

I'm working on a Voxel Engine and I'm using VBO.
My problem is that I don't know how to generate Vertices.
I need to generate vertices for a chunk, So create a Chunk of cubes (With one VBO) from X,Y,Z coords.
How Can I do this?
First you will need to create/load the data for the voxels. Something like the following could be used to generate a 10x10x10 field of binary voxels:
Note: All code is untested and probably not optimal.
class Field {
bool data[10*10*10];
public:
bool& at(int i, int j, int k) { return data[i*10*10 + j*10 + k]; }
};
Field aField;
for (int i = 0; i < 10; ++i) {
for (int j = 0; j < 10; ++j) {
for (int k = 0; k < 10; ++k) {
aField.at(i, j, k) = !(rand() % 2);
}
}
}
I'm assuming you are after Minecraft style voxels, otherwise you probably want to use something like Marching Cubes to process the binary voxel field and pass the result to the vertex buffer code.
The next step is for each occupied voxel in the field, draw the faces associated with the cube. For this we need to create a list of all vertices and (in this case) triangle indices.
std::vector< std::array<GLfloat, 3> > vecVerts;
std::vector< std::array<GLuint, 3> > vecTris;
for (int i,j,k from 0 to 10) {
if (aField.at(i,j,k)) {
// Add vertices for eight corners of cube
vecVerts.emplace_back(dWidth*(i ), dWidth*(j ), dWidth*(k ));
vecVerts.emplace_back(dWidth*(i+1), dWidth*(j ), dWidth*(k ));
vecVerts.emplace_back(dWidth*(i ), dWidth*(j+1), dWidth*(k ));
vecVerts.emplace_back(dWidth*(i+1), dWidth*(j+1), dWidth*(k ));
vecVerts.emplace_back(dWidth*(i ), dWidth*(j ), dWidth*(k+1));
vecVerts.emplace_back(dWidth*(i+1), dWidth*(j ), dWidth*(k+1));
vecVerts.emplace_back(dWidth*(i ), dWidth*(j+1), dWidth*(k+1));
vecVerts.emplace_back(dWidth*(i+1), dWidth*(j+1), dWidth*(k+1));
// Add triangle coordinates for each triangle
vecTris.emplace_back(0, 1, 3); vecTris.emplace_back(0, 2, 3);
vecTris.emplace_back(4, 5, 7); vecTris.emplace_back(4, 6, 7);
/* todo: add remaining triangles, should be 8 more */
}
}
The above code will create two arrays in memory storing your vertex locations and the indices for the triangles. You'll notice that the code always draws 12 triangles for each voxel, it would be better to do a check if there is an adjacent occupied voxel and remove faces between such voxels, but I'll leave that for you. I also recommend you consider using glm::vec3 for storing vertex data rather then just a array.
We can now pass our arrays to OpenGL:
GLuint unVertexArray;
glGenVertexArrays(1, &unVertexArray);
glBindVertexArray(unVertexArray);
GLuint unVertexBuffer;
glGenBuffers(1, &unVertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, unVertexBuffer);
glBufferData(GL_ARRAY_BUFFER, 3*vecVerts.size()*sizeof(GLfloat),
&vecVerts[0][0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
GLuint unIndiciesBuffer;
glGenBuffers(1, &unIndicesBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, unIndicesBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 3*vecTris.size()*sizeof(GLuint),
&vecTris[0][0], GL_STATIC_DRAW);
glBindVertexArray(0);
Then finally, to draw our arrays:
glBindVertexArray(unVertexArray);
glDrawElements(GL_TRIANGLES, 3*vecTris.size(), GL_UNSIGNED_INT, NULL);
glBindVertexArray(0);
I might (am almost certain I) have forgotten a few things here, but this should give you a general outline of what you will need to do. For a proper explanation of most of the gl calls used above there are many online references, such as http://arcsynthesis.org/gltut/index.html

Hard time understanding indices with glDrawElements

I'm trying to draw a terrain with GL_TRIANGLE_STRIP and glDrawElements but I'm having a really hard time understanding the indices thing behind glDrawElements...
Here's what I have so far:
void Terrain::GenerateVertexBufferObjects(float ox, float oy, float oz) {
float startWidth, startLength, *vArray;
int vCount, vIndex = -1;
// width = length = 256
startWidth = (width / 2.0f) - width;
startLength = (length / 2.0f) - length;
vCount = 3 * width * length;
vArray = new float[vCount];
for(int z = 0; z < length; z++) {
// vIndex == vIndex + width * 3 || width * 3 = 256 * 3 = 768
for(int x = 0; x < width; x++) {
vArray[++vIndex] = ox + startWidth + (x * stepWidth);
vArray[++vIndex] = oy + heights[z][x];
vArray[++vIndex] = oz + startLength + (z * stepLength);
}
}
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * vCount, vArray, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Terrain::DrawVBO(unsigned int texID, float ox, float oy, float oz) {
float terrainLight[] = { 1.0f, 1.0f, 1.0f, 1.0f };
if(!generatedVBOs) {
GenerateVertexBufferObjects(ox, oy, oz);
generatedVBOs = true;
}
unsigned int indices[] = { 0, 768, 3, 771 };
glGenBuffers(1, &indexBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * 4, indices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexPointer(3, GL_FLOAT, 0, 0);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBuffer);
glMaterialfv(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE, terrainLight);
glDrawElements(GL_TRIANGLE_STRIP, 4, GL_UNSIGNED_INT, 0);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
I believe my vArray is correct, I use the same values when drawing with glBegin(GL_TRIANGLE_STRIP)/glEnd which works just fine.
My guess was to use just the index of the x coordinate for each vertex. But I have no idea if that's the right way to use indices with glDrawElements.
0: Index of the x coordinate from the first vertex of the triangle. Location: (-128, -128).
768: Index of the x coordinate from the second vertex of the triangle. Location: (-128, -127)
3: Index of the x coordinate from the third vertex of the triangle. Location: (-127, -128)
771: Index of the x coordinate from the fourth vertex, which will draw a second triangle. Location: (-127, -127).
I think everything is making sense so far?
What's not working is that the location values above (which I doubled checked on vArray and they are correct) are not the same which glDrawElements is using. Two triangles are drawn but they are a lot bigger than what they should be. It starts correctly at (-128, -128) but it goes to something like (-125, -125) instead of (-127, -127).
I can't understand what I'm doing wrong here...
Using something like the following solves my problem:
unsigned int indices[] = { 0, 256, 1, 257 };
I think it's safe to assume that the index is the x coordinate and that OpenGL is expecting that to be followed by y and z but we shouldn't increase by 3 ourselves, the server does it for us.
And now that I think about it, glDrawElements has the word element on it, which in this case is a vertex with 3 coordinates as specified in glVertexPointer and we need to pass the indices to the element, not the vertex.
I feel so dumb now...

OpenGL 3.x: Access violation when using vertex buffer object and glDrawElements(...)

I have trouble rendering some geometry by using a vertex buffer object. I intend to draw a plane of points, so basically one vertex at every discrete position in my space. However, I cannot render that plane, as every time I call glDrawElements(...), application crashes returning an access violation exception. There must be some mistake while initialization, I guess.
This is what I have so far:
#define SPACE_X 512
#define SPACE_Z 512
typedef struct{
GLfloat x, y, z; // position
GLfloat nx, ny, nz; // normals
GLfloat r, g, b, a; // colors
} Vertex;
typedef struct{
GLuint i; // index
} Index;
// create vertex buffer
GLuint vertexBufferObject;
glGenBuffers(1, &vertexBufferObject);
// create index buffer
GLuint indexBufferObject;
glGenBuffers(1, &indexBufferObject);
// determine number of vertices / primitives
const int numberOfVertices = SPACE_X * SPACE_Z;
const int numberOfPrimitives = numberOfVertices; // As I'm going to render GL_POINTS, number of primitives is the same as number of vertices
// create vertex array
Vertex* vertexArray = new Vertex[numberOfVertices];
// create index array
Index* indexArray = new Index[numberOfPrimitives];
// create planes (vertex array)
// color of the vertices is red for now
int index = -1;
for(GLfloat x = -SPACE_X / 2; x < SPACE_X / 2; x++) {
for(GLfloat z = -SPACE_Z / 2; z < SPACE_Z / 2; z++) {
index++;
vertexArray[index].x = x;
vertexArray[index].y = 0.0f;
vertexArray[index].z = z;
vertexArray[index].nx = 0.0f;
vertexArray[index].ny = 0.0f;
vertexArray[index].nz = 1.0f;
vertexArray[index].r = 1.0;
vertexArray[index].g = 0.0;
vertexArray[index].b = 0.0;
vertexArray[index].a = 1.0;
}
}
// bind vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
// buffer vertex array
glBufferData(GL_ARRAY_BUFFER, numberOfVertices * sizeof(Vertex), vertexArray, GL_DTREAM_DRAW);
// bind vertex buffer again
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
// enable attrib index 0 (positions)
glEnableVertexAttribArray(0);
// pass positions in
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), vertexArray);
// enable attribute index 1 (normals)
glEnableVertexAttribArray(1);
// pass normals in
glVertexAttribPointer((GLuint)1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), &vertexArray[0].nx);
// enable attribute index 2 (colors)
glEnableVertexAttribArray(2);
// pass colors in
glVertexAttribPointer((GLuint)2, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), &vertexArray[0].r);
// create index array
for(GLunit i = 0; i < numberOfPrimitives; i++) {
indexArray[i].i = i;
}
// bind buffer
glBindBuffer(GL_ELEMENET_ARRAY_BUFFER, indexBufferObject);
// buffer indices
glBufferData(GL_ELEMENET_ARRAY_BUFFER, numberOfPrimitives * sizeof(Index), indexArray, GL_STREAM_DRAW);
// bind buffer again
glBindBuffer(GL_ELEMENET_ARRAY_BUFFER, indexBufferObject);
// AND HERE IT CRASHES!
// draw plane of GL_POINTS
glDrawElements(GL_POINTS, numberOfPrimitives, GL_UNSIGNED_INT, indexArray);
// bind default buffers
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// delete vertex / index buffers
glDeleteBuffers(1, &vertexBufferObject);
glDeleteBuffers(1, &indexBufferObject);
delete[] vertexArray;
vertexArray = NULL;
delete[] indexArray;
indexArray = NULL;
When you are using buffer objects, the last parameters in the gl*Pointer and 4th parameter in glDrawElements are no longer addresses in main memory (yours still are!), but offsets into the buffer objects. Make sure to compute these offsets in bytes! The "offsetof" macro is very helpful there.
Look at the second example on this page and compare it to what you did: http://www.opengl.org/wiki/VBO_-_just_examples
And you have one typo: GL_DTREAM_DRAW.
The method glEnableClientState(...) is deprecated! Sorry, for some reason I had overseen that fact.