Mixed types in VBO? - opengl

Let's say I want to upload unsigned integer and float data to the graphics card, in a single draw call. I use standard VBOs (not VAO, I'm using OpenGL 2.0), with the various vertex attribute arrays combined into the single GL_ARRAY_BUFFER, and pointed to individually using glVertexAttribPointer(...), so:
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferId);
glEnableVertexAttribArray(positionAttributeId);
glEnableVertexAttribArray(myIntAttributeId);
glVertexAttribPointer(positionAttributeId, 4, GL_FLOAT, false, 0, 0);
glVertexAttribPointer(colorAttributeId, 4, GL_UNSIGNED_INT, false, 0, 128);
glClear(...);
glDraw*(...);
The problem I have here is that my buffer (ref'ed by vertexBufferId), has to be created as a FloatBuffer in LWJGL, so that it can support the attribute of type GL_FLOAT, and this would seem to preclude the use of GL_INT here (or else, the other way around - it's either one or the other since the buffer cannot be of two types).
Any ideas? How would this be handled in native C code?

This would be handled in C (in a safe way) by doing this:
GLfloat *positions = malloc(sizeof(GLfloat) * 4 * numVertices);
GLuint *colors = malloc(sizeof(GLuint) * 4 * numVertices);
//Fill in data here.
//Allocate buffer memory
glBufferData(..., (sizeof(GLfloat) + sizeof(GLuint)) * 4 * numVertices, NULL, ...);
//Upload arrays
glBufferSubData(..., 0, sizeof(GLfloat) * 4 * numVertices, positions);
glBufferSubData(..., sizeof(GLfloat) * 4 * numVertices, sizeof(GLuint) * 4 * numVertices, colors);
free(positions);
free(colors);
There are other ways of doing this in as well, which involve a lot of casting and so forth. But this code emulates what you'll have to do in LWJGL.

Related

Why is OpenGL immediate mode faster than core?

I am using the following library to render text in OpenGL: fontstash. I have another header file which adds support for OpenGL 3.0+. The question is why is the render implementation with core profile much slower than the immediate mode?
Here is the render code with immediate mode:
static void glfons__renderDraw(void* userPtr, const float* verts, const float* tcoords, const unsigned int* colors, int nverts)
{
GLFONScontext* gl = (GLFONScontext*)userPtr;
if (gl->tex == 0) return;
glBindTexture(GL_TEXTURE_2D, gl->tex);
glEnable(GL_TEXTURE_2D);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(float)*2, verts);
glTexCoordPointer(2, GL_FLOAT, sizeof(float)*2, tcoords);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(unsigned int), colors);
glDrawArrays(GL_TRIANGLES, 0, nverts);
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
}
Here is the core profile render code:
static void gl3fons__renderDraw(void* userPtr, const float* verts, const float* tcoords, const unsigned int* colors, int nverts)
{
GLFONScontext* gl = (GLFONScontext*)userPtr;
if (gl->tex == 0) return;
if (gl->shader == 0) return;
if (gl->vao == 0) return;
if (gl->vbo == 0) return;
// init shader
glUseProgram(gl->shader);
// init texture
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, gl->tex);
glUniform1i(gl->texture_uniform, 0);
// init our projection matrix
glUniformMatrix4fv(gl->projMat_uniform, 1, false, gl->projMat);
// bind our vao
glBindVertexArray(gl->vao);
// setup our buffer
glBindBuffer(GL_ARRAY_BUFFER, gl->vbo);
glBufferData(GL_ARRAY_BUFFER, (2 * sizeof(float) * 2 * nverts) + (sizeof(int) * nverts), NULL, GL_DYNAMIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float) * 2 * nverts, verts);
glBufferSubData(GL_ARRAY_BUFFER, sizeof(float) * 2 * nverts, sizeof(float) * 2 * nverts, tcoords);
glBufferSubData(GL_ARRAY_BUFFER, 2 * sizeof(float) * 2 * nverts, sizeof(int) * nverts, colors);
// setup our attributes
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 2, (void *) (sizeof(float) * 2 * nverts));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(int), (void *) (2 * sizeof(float) * 2 * nverts));
glDrawArrays(GL_TRIANGLES, 0, nverts);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glUseProgram(0);
}
I made a small test for each implementation and the results show that immediate mode is significantly faster than core.
Both tests fill the screen with AAA... and I log the time it took to do this for each frame. This is the loop:
// Create GL stash for 512x512 texture, our coordinate system has zero at top-left.
struct FONScontext* fs = glfonsCreate(512, 512, FONS_ZERO_TOPLEFT);
// Add font to stash.
int fontNormal = fonsAddFont(fs, "sans", "fontstash/example/DroidSerif-Regular.ttf");
// Render some text
float dx = 10, dy = 10;
unsigned int white = glfonsRGBA(255,255,255,255);
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
fonsSetFont(fs, fontNormal);
fonsSetSize(fs, 20.0f);
fonsSetColor(fs, white);
for(int i = 0; i < 90; i++){
for( int j = 0; j < 190; j++){
dx += 10;
fonsDrawText(fs, dx, dy, "A", NULL);
}
dy += 10;
dx = 10;
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> time_span = t2 - t1;
std::cout<<"Time to render: "<<time_span.count()<<"ms"<<std::endl;
And the results show more than 400ms difference between the two:
Core profile (left) vs Immediate mode (right)
What should be changed in order to speed up performance?
I don't know exactly what gl is in this program, but it's pretty clear that, every time you want to render a piece of text, you perform the following operations:
Allocate storage for a buffer, reallocating whatever storage had been created from the last time this was called.
Perform three separate uploads of data to that buffer.
These are not good ways to stream vertex data to the GPU. There are specific techniques for doing this well, but this is not one of them. In particular, the fact that you are constantly reallocating the same buffer is going to kill performance.
The most effective way to deal with this is to have a single buffer with a fixed amount of storage. It gets allocated exactly once and never again. Ideally, whatever API you're getting vertex data from would provide it in an interleaved format, so that you would only need to perform one upload rather than three. But apparently, Fontstash is apparently not so generous.
In any case, the main idea is to avoid reallocation and synchronization. The latter means never trying to write over data that has been written to recently. So your buffer needs to be sufficiently large to hold twice the number of font vertices you ever expect to render. Essentially, you double-buffer the vertex data: writing to one set of data while the other set is being read from.
So at the beginning of the frame, you figure out what the byte offset to where you want to render will be. This will either be the start of the buffer or half-way through it. Then, for each blob of text, you write vertex data to this offset and increment the offset accordingly.
And to avoid having to change VAO state, you should interleave the vertex data manually. Instead of uploading three arrays, you should interleave the vertices so that you're effectively making one gigantic array of vertices. So you never need to call glVertexAttribPointer in the middle of this function; you just use the parameters to glDraw* to draw the part of the array you want.
This also means you only need one glBufferSubData call. But if you have access to persistent mapped buffers, you don't even need that, since you can just write to the memory directly while using the other portion of it. Though if you use persistent mapping, you will need to use a fence sync object when you switch buffer regions to make sure that you're not writing to vertex data that is still being read by the GPU.

How come no cube is drawn on my screen with this code in a GLFW window?

I have a bunch of code (copied from various tutorials) that is supposed to draw a random color-changing cube that the camera shifts around every second or so (with a variable, not using timers yet). It worked in the past before I moved my code into distinctive classes and shoved it all into my main function, but now I can't see anything on the main window other than a blank background. I cannot pinpoint any particular issue here as I am getting no errors or exceptions, and my own personally defined code checks out; when I debugged, every variable had a value I expected, and the shaders I used (in string form) worked in the past before I re-organized my code. I can print out the vertices of the cube in the same scope as the glDrawArrays() function as well, and they have the correct values too. Basically, I have no idea what's wrong with my code that is causing nothing to be drawn.
My best guess is that I called - or forgot to call - some opengl function improperly with the wrong data in one of the three methods of my Model class. In my program, I create a Model object (after glfw and glad are initialized, which then calls the Model constructor), update it every once and a while (time doesn't matter) through the update() function, then draw it to my screen every time my main loop is run through the draw() function.
Possible locations of code faults:
Model::Model(std::vector<GLfloat> vertexBufferData, std::vector<GLfloat> colorBufferData) {
mVertexBufferData = vertexBufferData;
mColorBufferData = colorBufferData;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &VBO);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, VBO);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(mVertexBufferData), &mVertexBufferData.front(), GL_STATIC_DRAW);
glGenBuffers(1, &CBO);
glBindBuffer(GL_ARRAY_BUFFER, CBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(mColorBufferData), &mColorBufferData.front(), GL_STATIC_DRAW);
// Create and compile our GLSL program from the shaders
programID = loadShaders(zachos::DATA_DEF);
glUseProgram(programID);
}
void Model::update() {
for (int v = 0; v < 12 * 3; v++) {
mColorBufferData[3 * v + 0] = (float)std::rand() / RAND_MAX;
mColorBufferData[3 * v + 1] = (float)std::rand() / RAND_MAX;
mColorBufferData[3 * v + 2] = (float)std::rand() / RAND_MAX;
}
glBufferData(GL_ARRAY_BUFFER, sizeof(mColorBufferData), &mColorBufferData.front(), GL_STATIC_DRAW);
}
void Model::draw() {
// Setup some 3D stuff
glm::mat4 mvp = Mainframe::projection * Mainframe::view * model;
GLuint MatrixID = glGetUniformLocation(programID, "MVP");
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &mvp[0][0]);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, CBO);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the array
glDrawArrays(GL_TRIANGLES, 0, mVertexBufferData.size() / 3);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
};
My question is simple, how come my program won't draw a cube on my screen? Is the issue within these three functions or elsewhere? I can provide more general information about the drawing process if needed, though I believe the code I provided is enough, since I literally just call model.draw().
sizeof(std::vector) will usually just be 24bytes (since the struct contains 3 pointers typically). So basically both of your buffers have 6 floats loaded in them, which is not enough verts for a single triangle, lets alone a cube!
You should instead be calling size() on the vector when loading the data into the vertex buffers.
glBufferData(GL_ARRAY_BUFFER,
mVertexBufferData.size() * sizeof(float), ///< this!
mVertexBufferData.data(), ///< prefer calling data() here!
GL_STATIC_DRAW);

Access violation error when calling glDrawArrays

I'm writing and OpenGL application where I have a GrassPatch class that represents patches of grass in the scene. I don't want to provide any unnecessary details, so the GrassPatch.cpp looks roughly like this:
GrassPatch::GrassPatch(GLuint density)
{
m_density = density;
generateVertices();
}
void GrassPatch::generateVertices()
{
const int quadVertexCount = 64;
GLfloat bladeWidth, bladeHeight, r;
GLfloat randomX, randomZ;
m_vertices = new GLfloat[quadVertexCount * m_density];
srand(time(NULL));
for (int i = 0; i < m_density; i++)
{
// generate 64 float values and put them into their respective indices in m_vertices
}
glGenBuffers(1, &m_VBO);
glGenVertexArrays(1, &m_VAO);
glBindVertexArray(m_VAO);
glBindBuffer(GL_ARRAY_BUFFER, m_VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * m_density * quadVertexCount, m_vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)(5 * sizeof(GLfloat)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(3, 8, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)(8 * sizeof(GLfloat)));
glEnableVertexAttribArray(3);
glBindVertexArray(0);
}
void GrassPatch::draw()
{
glBindVertexArray(m_VAO);
glPatchParameteri(GL_PATCH_VERTICES, 4);
glDrawArrays(GL_PATCHES, 0, 4 * m_density);
glBindVertexArray(0);
}
In short, the vertex array object (VAO) for each grass patch is generated inside generatVertices. My data is tightly packed and the attributes for each vertex are at indices 0, 3, 5, 8, where each vertex is composed of 16 float's. Each grass blade consists of 4 vertices, hence quadVertexCount is set to 64. The vertex shader I use is pretty straighforward and looks like this:
#version 440 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 texCoord;
layout (location = 2) in vec3 centerPos;
layout (location = 3) in float randomValues[8];
out vec2 TexCoord_CS;
void main()
{
TexCoord_CS = texCoord;
gl_Position = vec4(position, 1.0f);
}
The problem here is, when I try to draw each grass blade using the draw() method, I get an access violation error. However, if I slightly change the attribute indices to 0, 4, 8, 12 and make the necessary variable type changes in the vertex shader, the problem disappears and everything renders fine.
What am I missing here, what would cause a problem like this? I've spent hours on the Internet, trying to find the reason but couldn't come up with anything yet. I'm working with Visual Studio 2015 Community Edition. The graphics card I use is NVIDIA GTX 770 and all drivers are up to date.
This is not a valid call:
glVertexAttribPointer(3, 8, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)(8 * sizeof(GLfloat)));
The second argument (size) needs to be 1, 2, 3, or 4. If you call glGetError(), you should see a GL_INVALID_VALUE error code from this call.
Vertex attributes can only have up to 4 components, matching a vec4 type in the shader code. If you need 8 values for an attribute, you'll have to split it into 2 attributes of 4 values each, or use uniforms instead of attributes.
layout (location = 3) in float randomValues[8];
This is not a single input value. This is an array of input values. While this is perfectly legal, it does change what this means.
In particular, it means that this input array is filled in by eight separate attributes. Yes, each one of those floats is a separate attribute, from the OpenGL side. They are assigned locations sequentially, starting with the location you specified. So the input randomValues[4] comes from attribute location 7 (3 + 4).
So your attempt to provide 8 values with one glVertexAttribPointer call will not work. Well, it was never going to work, since the number of components per attribute must be on the range [1, 4]. But it double-doesn't work, since you're not filling in the other 7.
If you want to pass these 8 elements as 8 attributes like this, you therefore need eight independent calls to glVertexAttribPointer:
for(int ix = 0; ix < 8; ++ix)
glVertexAttribPointer(3 + ix, 1, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)((8 + ix) * sizeof(GLfloat)));
But quite frankly, you shouldn't do that. Instead of passing 8 independent attributes, you should pass 2 vec4's:
layout (location = 3) in vec4 randomValues[2];
That way, you only need 2 attributes in your OpenGL code:
glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)(8 * sizeof(GLfloat)));
glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, 16 * sizeof(GLfloat), (GLvoid*)(12 * sizeof(GLfloat)));

Use of glDrawElements I Don't Understand

I am studying the source code of an open source project and they have a use of the function glDrawElements which I don't understand. While being a programmer, I am quite new to the GL API so would appreciate if someone could tell me how this works.
Let's start with the drawing part. The code looks like this:
for (int i = 0; i < numObjs; i++) {
glDrawElements(GL_TRIANGLES, vboIndexSize(i), GL_UNSIGNED_INT, (void*)(UPTR)vboIndexOffset(i));
}
vboIndiexSize(i) returns the number of indices for the current object, and vboIndexOffset returns the offset in bytes, in a flat memory array in which vertex data AND the indices of the objects are stored.
The part I don't understand, is the (void*)(UPTR)vboIndexOffset(i)). I look at the code many times and the function vboIndexOffset returns a int32 and UPTR also cast the returned value to an int32. So how you can you cast a int32 to a void* and expect this to work? But let's assume I made a mistake there and that it actually returns a pointer to this variable instead. The 4th argument of the glDrawElements call is an offset in byte within a memory block. Here is how the data is actually stored on the GPU:
int ofs = m_vertices.getSize();
for (int i = 0; i < numObj; i++)
{
obj[i].ofsInVBO = ofs;
obj[i].sizeInVBO = obj[i].indices->getSize() * 3;
ofs += obj[i].indices->getNumBytes();
}
vbo.resizeDiscard(ofs);
memcpy(vbo.getMutablePtr(), vertices.getPtr(), vertices.getSize());
for (int i = 0; i < numObj; i++)
{
memcpy(
m_vbo.getMutablePtr(obj[i].ofsInVBO),
obj[i].indices->getPtr(),
obj[i].indices->getNumBytes());
}
So all they do is calculate the number of bytes needed to store the vertex data then add to this number the number of bytes needed to store the indices of all the objects we want to draw. Then they allocate memory of that size, and copy the data in this memory: first the vertex data and then the indices. One this is done they push it to the GPU using:
glGenBuffers(1, &glBuffer);
glBindBuffer(GL_ARRAY_BUFFER, glBuffer);
checkSize(size, sizeof(GLsizeiptr) * 8 - 1, "glBufferData");
glBufferData(GL_ARRAY_BUFFER, (GLsizeiptr)size, data, GL_STATIC_DRAW);
What's interesting is that they store everything in the GL_ARRAY_BUFFER. They never store the vertex data in a GL_ARRAY_BUFFER and then the indices using a GL_ELEMENT_ARRAY_BUFFER.
But to go back to the code where the drawing is done, they first do the usual stuff to declare vertex attribute. For each attribute:
glBindBuffer(GL_ARRAY_BUFFER, glBuffer);
glEnableVertexAttribArray(loc);
glVertexAttribPointer(loc, size, type, GL_FALSE, stride, pointer);
This makes sense and is just standard. And then the code I already mentioned:
for (int i = 0; i < numObjs; i++) {
glDrawElements(GL_TRIANGLES, vboIndexSize(i), GL_UNSIGNED_INT, (void*)(UPTR)vboIndexOffset(i));
}
So the question: even if (UPTR) actually returns the pointer to variable (the code doesn't indicate this but I may be mistaken, it's a large project), I didn't know it was possible to store all vertex and indices data with the same memory block using GL_ARRAY_BUFFER and then using glDrawElements and having the 4th argument being the offset to the first element of this index list for the current object from this memory block. I thought you needed to use GL_ARRAY_BUFFER and GL_ELEMENT_BUFFER to declare the vertex data and the indices separately. I didn't think you could declare all the data in one go using GL_ARRAY_BUFFER and can't get it to work on my side anyway.
Has anyone see this working before? I haven't got a chance to get it working as yet, and wonder if someone could just potentially tell me if there's something specific I need to be aware of to get it to work. I tested with a simple triangle with position, normal and texture coordinates data, thus I have 8 * 3 floats for the vertex data and I have an array of 3 integers for the indices, 0, 1, 2. I then copy everything in a memory block, initialize the glBufferData with this, then try to draw the triangle with:
int n = 96; // offset in bytes into the memory block, fist int in the index list
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_INT, (void*)(&n));
It doesn't crash but I can't see the triangle.
EDIT:
Adding the code that doesn't seem to work for me (crashes).
float vertices[] = {
0, 1, 0, // Vertex 1 (X, Y)
2, -1, 0, // Vertex 2 (X, Y)
-1, -1, 0, // Vertex 3 (X, Y)
3, 1, 0,
};
U8 *ptr = (U8*)malloc(4 * 3 * sizeof(float) + 6 * sizeof(unsigned int));
memcpy(ptr, vertices, 4 * 3 * sizeof(float));
unsigned int indices[6] = { 0, 1, 2, 0, 3, 1 };
memcpy(ptr + 4 * 3 * sizeof(float), indices, 6 * sizeof(unsigned int));
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 4 * 3 * sizeof(float) + 6 * sizeof(unsigned int), ptr, GL_STATIC_DRAW);
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
free(ptr);
Then when it comes to draw:
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// see stackoverflow.com/questions/8283714/what-is-the-result-of-null-int/
typedef void (*TFPTR_DrawElements)(GLenum, GLsizei, GLenum, uintptr_t);
TFPTR_DrawElements myGlDrawElements = (TFPTR_DrawElements)glDrawElements;
myGlDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, uintptr_t(4 * 3 * sizeof(float)));
This crashes the app.
see answer below for solution
This is due to OpenGL re-using fixed-function pipeline calls. When you bind a GL_ARRAY_BUFFER VBO, a subsequent call to glVertexAttribPointer expects an offset into the VBO (in bytes), which is then cast to a (void *). The GL_ARRAY_BUFFER binding remains in effect until another buffer is bound, just as the GL_ELEMENT_ARRAY_BUFFER binding remains in effect until another 'index' buffer is bound.
You can encapsulate the buffer binding and attribute pointer (offset) states using a Vertex Array Object.
The address in your example isn't valid. Cast offsets with: (void *) n
Thanks for the answers. I think though that (and after doing some research on the web),
first you should be using glGenVertexArray. It seems that this is THE standard now for OpenGL4.x so rather than calling glVertexAttribPointer before drawing the geometry, it seems like it's best practice to create a VAO when the data is pushed to the GPU buffers.
I (actually) was able to make combine the vertex data and the indices within the SAME buffer (a GL_ARRAY_BUFFER) and then draw the primitive using glDrawElements (see below). The standard way anyway is to push the vertex data to a GL_ARRAY_BUFFER and the indices to a GL_ELEMENT_ARRAY_BUFFER separately. So if that's the standard way of doing it, it's probably better not to try to be too smart and just use these functions.
Example:
glGenBuffers(1, &vbo);
// push the data using GL_ARRAY_BUFFER
glGenBuffers(1, &vio);
// push the indices using GL_ELEMENT_ARRAY_BUFFER
...
glGenVertexArrays(1, &vao);
// do calls to glVertexAttribPointer
...
Please correct me if I am wrong, but that seems the correct (and only) way to go.
EDIT:
However, it is actually possible to "pack" the vertex data and the indices together into an ARRAY_BUFFER as long as a call to glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo) is done prior to calling glDrawElements.
Working code (compared with code in original post):
float vertices[] = {
0, 1, 0, // Vertex 1 (X, Y)
2, -1, 0, // Vertex 2 (X, Y)
-1, -1, 0, // Vertex 3 (X, Y)
3, 1, 0,
};
U8 *ptr = (U8*)malloc(4 * 3 * sizeof(float) + 6 * sizeof(unsigned int));
memcpy(ptr, vertices, 4 * 3 * sizeof(float));
unsigned int indices[6] = { 0, 1, 2, 0, 3, 1 };
memcpy(ptr + 4 * 3 * sizeof(float), indices, 6 * sizeof(unsigned int));
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, 4 * 3 * sizeof(float) + 6 * sizeof(unsigned int), ptr, GL_STATIC_DRAW);
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(0);
free(ptr);
Then when it comes to draw:
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo); // << THIS IS ACTUALLY NOT NECESSARY
// VVVV THIS WILL MAKE IT WORK VVVV
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vbo);
// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
// see stackoverflow.com/questions/8283714/what-is-the-result-of-null-int/
typedef void (*TFPTR_DrawElements)(GLenum, GLsizei, GLenum, uintptr_t);
TFPTR_DrawElements myGlDrawElements = (TFPTR_DrawElements)glDrawElements;
myGlDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, uintptr_t(4 * 3 * sizeof(float)));

What OpenGL state needs to be reset upon a new shared context?

I have the following class to represent a Mesh;
OpenGLMesh::OpenGLMesh(const std::vector<float>& vertexData, const std::vector<float>& normalData, const std::vector<float>& texCoords, const std::vector<uint32_t>& indexData) : mIndices(indexData.size())
{
glGenBuffers(1, &mVBO);
glGenBuffers(1, &mIndexBuffer);
glGenVertexArrays(1, &mVAO);
// buffer vertex, normals and index data
size_t vertexDataSize = vertexData.size() * sizeof(float);
size_t normalDataSize = normalData.size() * sizeof(float);
size_t texCoordDataSize = texCoords.size() * sizeof(float);
size_t indexDataSize = indexData.size() * sizeof(uint32_t);
glBindVertexArray(mVAO);
glBindBuffer(GL_ARRAY_BUFFER, mVBO);
glBufferData(GL_ARRAY_BUFFER, vertexDataSize + normalDataSize + texCoordDataSize, NULL, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, NULL, vertexDataSize, &vertexData[0]);
glBufferSubData(GL_ARRAY_BUFFER, vertexDataSize, normalDataSize, &normalData[0]);
glBufferSubData(GL_ARRAY_BUFFER, vertexDataSize + normalDataSize, texCoordDataSize, &texCoords[0]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, mIndexBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indexDataSize, &indexData[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)(vertexDataSize));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, (GLvoid*)(vertexDataSize + normalDataSize));
// unbind array buffer and VAO
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
And then a method to draw the mesh;
void OpenGLMesh::Render()
{
glBindVertexArray(mVAO);
glDrawElements(GL_TRIANGLES, mIndices, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
I am using GLFW3 where you can create a new window and use the same context as the previous window (link), however as I understand it you still need to reset the OpenGL states even though the buffer objects and their contents are still saved - correct?
I tried reading on the manual but I cannot find out what parts of the code I posted is treated as part of OpenGL state and needs to be reset?
create a new window and use the same context
Not quite. GLFW will create a new context that shares some of the other contexts' objects. Namely ever object that holds some data (textures, buffer objects, shaders and programs, etc) will be shared, i.e. accessible from both contexts. Container objects, which reference other objects, are not shared (framebuffer objects, vertex array objects).
however as I understand it you still need to reset the OpenGL states
Technically the newly created contexts start in a reset default state, with some objects already preallocated and initialized.
However like any state machine you should never assume OpenGL to be in a certain state when you're about to use it. Always make sure you set all the required state right before you need it when you need it. Which boils down, that you should set each and every state you require at the beginning of your drawing code.