I have written a basic program that loads a model and renders it to the screen. I'm using GLSL to transform the model appropriately, but the normals always seem to be incorrect after rotating them with every combination of model matrix, view matrix, inverse, transpose, etc that I could think of. The model matrix is just a rotation around the y-axis using glm:
angle += deltaTime;
modelMat = glm::rotate(glm::mat4(), angle, glm::vec3(0.f, 1.f, 0.f));
My current vertex shader code (I've modified the normal line many many times):
#version 150 core
uniform mat4 projMat;
uniform mat4 viewMat;
uniform mat4 modelMat;
in vec3 inPosition;
in vec3 inNormal;
out vec3 passColor;
void main()
{
gl_Position = projMat * viewMat * modelMat * vec4(inPosition, 1.0);
vec3 normal = normalize(mat3(inverse(modelMat)) * inNormal);
passColor = normal;
}
And my fragment shader:
#version 150 core
in vec3 passColor;
out vec4 outColor;
void main()
{
outColor = vec4(passColor, 1.0);
}
I know for sure that the uniform variables are being passed to the shader properly, as the model itself gets transformed properly, and the initial normals are correct if I do calculations such as directional lighting.
I've created a GIF of the rotating model, sorry about the low quality:
http://i.imgur.com/LgLKHCb.gif?1
What confuses me the most is how the normals appear to rotate on multiple axis, which I don't think should happen when multiplied by a simple rotation matrix on one axis.
Edit:
I've added some more of the client code below.
This is where the buffers get bound for the model, in the Mesh class (vao is GLuint, defined in the class):
GLuint vbo[3];
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(normals? (uvcoords? 3 : 2) : (uvcoords? 2 : 1), vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo[0]);
glBufferData(GL_ARRAY_BUFFER, vcount * 3 * sizeof(GLfloat), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
if(normals)
{
glBindBuffer(GL_ARRAY_BUFFER, vbo[1]);
glBufferData(GL_ARRAY_BUFFER, vcount * 3 * sizeof(GLfloat), normals, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_TRUE, 0, 0);
glEnableVertexAttribArray(1);
}
if(uvcoords)
{
glBindBuffer(GL_ARRAY_BUFFER, vbo[2]);
glBufferData(GL_ARRAY_BUFFER, vcount * 2 * sizeof(GLfloat), uvcoords, GL_STATIC_DRAW);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(2);
}
glBindVertexArray(0);
glGenBuffers(1, &ib);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ib);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, icount * sizeof(GLushort), indices, GL_STATIC_DRAW);
This is where the shaders are compiled after being loaded into memory with a simple readf(), in the Material class:
u32 vertexShader = glCreateShader(GL_VERTEX_SHADER);
u32 fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(vertexShader, 1, (const GLchar**)&vsContent, 0);
glCompileShader(vertexShader);
if(!validateShader(vertexShader)) return false;
glShaderSource(fragmentShader, 1, (const GLchar**)&fsContent, 0);
glCompileShader(fragmentShader);
if(!validateShader(fragmentShader)) return false;
programHandle = glCreateProgram();
glAttachShader(programHandle, vertexShader);
glAttachShader(programHandle, fragmentShader);
glBindAttribLocation(programHandle, 0, "inPosition");
glBindAttribLocation(programHandle, 1, "inNormal");
//glBindAttribLocation(programHandle, 2, "inUVCoords");
glLinkProgram(programHandle);
if(!validateProgram()) return false;
And the validateShader(GLuint) and validateProgram() functions:
bool Material::validateShader(GLuint shaderHandle)
{
char buffer[2048];
memset(buffer, 0, 2048);
GLsizei len = 0;
glGetShaderInfoLog(shaderHandle, 2048, &len, buffer);
if(len > 0)
{
Logger::log("ve::Material::validateShader: Failed to compile shader - %s", buffer);
return false;
}
return true;
}
bool Material::validateProgram()
{
char buffer[2048];
memset(buffer, 0, 2048);
GLsizei len = 0;
glGetProgramInfoLog(programHandle, 2048, &len, buffer);
if(len > 0)
{
Logger::log("ve::Material::validateProgram: Failed to link program - %s", buffer);
return false;
}
glValidateProgram(programHandle);
GLint status;
glGetProgramiv(programHandle, GL_VALIDATE_STATUS, &status);
if(status == GL_FALSE)
{
Logger::log("ve::Material::validateProgram: Failed to validate program");
return false;
}
return true;
}
Each Material instance has a std::map of Meshs, and get rendered as so:
void Material::render()
{
if(loaded)
{
glUseProgram(programHandle);
for(auto it = mmd->uniforms.begin(); it != mmd->uniforms.end(); ++it)
{
GLint loc = glGetUniformLocation(programHandle, (const GLchar*)it->first);
switch(it->second.type)
{
case E_UT_FLOAT3: glUniform3fv(loc, 1, it->second.f32ptr); break;
case E_UT_MAT4: glUniformMatrix4fv(loc, 1, GL_FALSE, it->second.f32ptr); break;
default: break;
}
}
for(Mesh* m : mmd->objects)
{
GLint loc = glGetUniformLocation(programHandle, "modelMat");
glUniformMatrix4fv(loc, 1, GL_FALSE, &m->getTransform()->getTransformMatrix()[0][0]);
m->render();
}
}
}
it->second.f32ptr would be a float pointer to &some_vec3[0] or &some_mat4[0][0].
I manually upload the model's transformation matrix before rendering, however (which is only a rotation matrix, the Transform class (returned by Mesh::getTransform()) will only do a glm::rotation() since I was trying to figure out the problem).
Lastly, the Mesh render code:
if(loaded)
{
glBindVertexArray(vao);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ib);
glDrawElements(GL_TRIANGLES, indexCount, GL_UNSIGNED_SHORT, 0);
}
I think this is all the necessary code, but I can post more if needed.
Your nomal matrix calculation is just wrong. The correct normal matrix would be the transpose of the inverse of the upper-left 3x3 submatrix of the model or modelview matrix (depending on which space you want to do your lighting calculations).
What you do is just inverting the full 4x4 matrix and taking the upper-left 3x3 submatrix of that, which is just totally wrong.
You should calculate transpose(inverse(mat3(modelMat))), but you really shouldn't do this in the shader, but calulate this toghether with the model matrix on the CPU to avoid letting the GPU calculate a quite expensive matrix inversion per vertex.
As long as your transformations consist of only rotations, translations, and uniform scaling, you can simply apply the rotation part of your transformations to the normals.
In general, it's the transposed inverse matrix that needs to be applied to the normals, using only the regular 3x3 linear transformation matrix, without the translation part that extends the matrix to 4x4.
For rotations and uniform scaling, the inverse-transpose is identical to the original matrix. So the matrix operations to invert and transpose matrices are only needed if you apply other types of transformations, like non-uniform scaling, or shear transforms.
Apparently, if the vertex normals of a mesh are incorrect, then strange rotation artifacts will occur. In my case, I had transformed the mesh in my 3D modelling program (Blender) by 90 degrees on the X axis, as Blender uses the z-axis as its vertical axis, whereas my program uses the y-axis as the vertical axis. However, the method I used to transform/rotate the mesh in Blender in my export script did not properly transform the normals, but only the positions of the vertices. Without any prior transformations, the program works as expected. I initially found out that the normals were incorrect by comparing the normalized positions and normals in a symmetrical object (I used a cube with smoothed normals), and saw that the normals were rotated. Thank you to #derhass and #Solkar for guiding me to the answer.
However, if anyone still wants to contribute, I would like to know why the normals don't rotate in one axis when multiplied by a single axis rotation matrix, even if they are incorrect.
Related
I loaded the material and texture information from the Obj file based on this code (https://github.com/Bly7/OBJ-Loader). Now I want to load Sponza and render it. However, there are 10 or more textures, and even if all are passed to the shader, it is necessary to correspond to the appropriate vertices. Looking at the result of loading from the Obj file, textures are assigned to each part.
Example.
Mesh 0 : Pillar
position0(x, y, z), position1(x, y, z), uv0(x, y) ...
diffuse texture : tex0.png
Mesh 1 : Wall
position0(x, y, z), position1(x, y, z), uv0(x, y) ...
diffuse texture : tex1.png
.
.
.
Textures are kept as an array, and each has a corresponding mesh index. In my opinion, when passing vertex information to the shader, it works well if you divide it by the number of meshes and pass the texture at the same time. However, I'm not sure if this idea is correct, and I've tried several methods but it doesn't work.
This is the current simple code.
main.cpp :
do {
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture[i]); // texture[] : Array of textures loaded from obj file.
glUniform1i(glGetUniformLocation(shaderID, "myTex"), 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertex_position);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, texCoord);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, element);
glDrawElements(GL_TRIANGLES, element_indices.size(), GL_UNSIGNED_INT, (void*)0);
} while(glfwWindowShouldClose(window) == 0);
Fragment shader :
layout(location = 0) out vec4 out_color;
// from vertex shader
in vec2 texCoord;
uniform sampler2D myTex;
void main() {
out_color = texture(myTex, texCoord);
}
I want to correspond to the mesh index loaded with the "i" in the above code. Please let me know if my idea is wrong or if there is another way.
As your model has only one texture per mesh, I can suggest this simple code to use:
do {
glActiveTexture(GL_TEXTURE0);
glUniform1i(glGetUniformLocation(shaderID, "myTex"), 0);
for (unsigned int i = 0; i < mesh_count; i++) {
glcall(glBindTexture(type, texture[i]));
// Bind vertex and index (or element) buffer and setup vertex attribute pointers
// Draw mesh
}
} while (window_open);
The code much self-explaning. It first activates texture slot 0, then for every mesh it binds texture, vertex buffer, index or element buffer and does any preparation need to draw the mesh. Then it issues draw call.
Note that this is very basic example. Most models would look weird with this code. I would recommend to this tutorial from LearnOpenGL which explain this more broadly but in an easy way.
I'm porting an older program using glBegin()/glEnd() (top picture) to glDrawArraysInstanced() (bottom picture). I expected some performance improvements, but I got the opposite. Now this is the first time I've tried using glDrawArraysInstanced() so I think I must have screwed up somewhere.
The two are basically identical and the only difference is how they draw the circles.
What have I done wrong? And if not, what makes it slower?
// This runs once at startup
std::vector<glm::mat4> transforms;
glGenBuffers(NUM_BUFFERS, VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO[TRANSFORM]);
for (int i = 0; i < 4; ++i) {
glEnableVertexAttribArray(1 + i);
glVertexAttribPointer(1 + i, 4, GL_FLOAT, GL_FALSE, sizeof(glm::mat4),
(GLvoid *)(i * sizeof(glm::vec4)));
glVertexAttribDivisor(1 + i, 1);
} // ---------
// This runs every frame
if (num_circles > transforms.size()) transforms.resize(num_circles);
int i = 0;
for (const auto &circle : circle_vec) {
transforms[i++] = circle.transform.getModel();
}
glBindBuffer(GL_ARRAY_BUFFER, VBO[TRANSFORM]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::mat4) * num_circles, &transforms[0], GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(VAO);
glDrawArraysInstanced(GL_LINE_LOOP, 0, CIRCLE_NUM_VERTICES, num_circles);
glBindVertexArray(0);
// ---------
// And this is the vertex shader
#version 410
in vec3 position;
in mat4 transform;
void main()
{
gl_Position = transform * vec4(position, 1.0);
}
What I saw at my first glimpse is that you are creating a new vector on every frame. Consider caching it.
// This runs every frame
std::vector<glm::mat4> transforms;
I'm very new to openGL and I am doing a mini project where I experiment with the depth buffer. I got to the stage of displaying it to the screen. However I want to draw it as screen coordinates instead of converting to floats. I read somewhere that I need to use a projection matrix. I have looked for ages and tested loads of different options but I can't seem to get it right.
Can anyone point me to a useful resource or explain how I would go about doing this?
EDIT
At the moment my matrix looks like this:
projectionMat = glm::ortho(0.0f, (float)_cols, 0.0f, (float)_rows, 0.0f, (float)_maxDepthVal);
projection = glGetUniformLocation(_program, "Projection");
glUniformMatrix4fv(projection, 1, GL_FALSE, glm::value_ptr(projectionMat));
EDIT 2
With some fiddling I found that cols had to be negative for some strange reason before it would display. I twill now display correctly on the screen but for some reason it his a gap around the sides opposite the origin, why is this? Even a small move in the camera position and target cause all of it to vanish so I don't think that would be the problem.
Pixel Art Representation!!
OOOO!!
OOOO!!
OOOO!!
!!!!!!!!!!!!!!
New code
glm::mat4 Projection = glm::ortho(0.0f, -static_cast<float>(_cols), 0.0f, static_cast<float>(_rows), 0.0f, static_cast<float>(_maxDepthVal));
projection = glGetUniformLocation(_program, "Projection");
glm::mat4 View = glm::lookAt(
glm::vec3(0.0f, 0.0f, -0.1f),
glm::vec3(0.0f , 0.0f, 0.0f), // and looks at the origin
glm::vec3(0,1,0) // Head is up (set to 0,-1,0 to look upside-down)
);
// Model matrix : an identity matrix (model will be at the origin)
glm::mat4 Model = glm::mat4(1.0f);
projectionMat = Projection * View * Model;
glUniformMatrix4fv(projection, 1, GL_FALSE, glm::value_ptr(projectionMat));
EDIT 3
I can translate it using the Model matrix but it has a gap of 5 pixels around it that I can't get rid of, any help on that would be appreciated but thanks for taken an interest.
UPDATE
As per request my draw code
glUseProgram(_program);
glDepthMask(GL_TRUE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_ALWAYS);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
SDL_GL_SwapWindow(_window);
glPointSize(1);
glEnableVertexAttribArray(0);
//Insert matrix here
glVertexAttribPointer(0, 3, GL_UNSIGNED_INT, GL_FALSE, 0, 0);
glDrawArrays(GL_POINTS, 0, _dataCount)
glDisableVertexAttribArray(0);
my vbo:
glGenBuffers(1, &_vbo);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glBufferData(GL_ARRAY_BUFFER, _dataCount * 4 * sizeof(unsigned int), NULL, GL_STATIC_DRAW);
if(_vbo == 0 || glGetError() != GL_NO_ERROR)
{
_errorMessage = "VBO COULD NOT BE CREATED";
error();
}
checkCudaErrors(cudaGraphicsGLRegisterBuffer(&vbo, _vbo, cudaGraphicsMapFlagsNone));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glUseProgram(0);
I'm also having issues with the write as when it converts to floats(for drawing) it loses precision so if I read the value out again it rounds to the nearest factor(0, 256, 512 etc.). Is there another way to do it that stores it as unsigned int. (I realize this is getting slightly off topic but any help would be appreciated)
The issue appeared to be with the cols variable, it needed to be inverted to work otherwise it was off the screen.
I have a cube that I am loading from an OBJ file. When I make its position (0, 0, 0) everything works fine. The cube renders, and my function that gives it a velocity moves the cube across the screen. However if I change the position of the cube to something other than (0, 0, 0) before entering my while loop where I render and calculate velocity changes, the cube never renders. This is the first time I have tried to reload my vertices every time I render a frame, and I am assuming I messed up something there - but I've looked over other code and can't figure out what.
Here is my main function:
int main()
{
#ifdef TESTING
testing();
exit(0);
#endif
setupAndInitializeWindow(768, 480, "Final Project");
TriangleTriangleCollision collisionDetector;
Asset cube1("cube.obj", "vertexShader.txt", "fragmentShader.txt");
cube1.position = glm::vec3(0.0, 2.0, 0.0);
cube1.velocity = glm::vec3(0.0, -0.004, 0.0);
MVP = projection * view * model;
do{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
moveAsset(cube1);
renderAsset(cube1);
glfwSwapBuffers(window);
glfwPollEvents();
} while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
glfwTerminate();
return 0;
}
my moveAsset function:
void moveAsset(Asset &asset)
{
double currentTime = glfwGetTime();
asset.position.x += (asset.velocity.x * (currentTime - asset.lastTime));
asset.position.y += (asset.velocity.y * (currentTime - asset.lastTime));
asset.position.z += (asset.velocity.z * (currentTime - asset.lastTime));
for (glm::vec3 &vertex : asset.vertices)
{
glm::vec4 transformedVector = glm::translate(glm::mat4(1.0f), asset.position) * glm::vec4(vertex.x, vertex.y, vertex.z, 1);
vertex = glm::vec3(transformedVector.x, transformedVector.y, transformedVector.z);
}
asset.lastTime = glfwGetTime();
}
void renderAsset(Asset asset)
{
glUseProgram(asset.programID);
GLuint MatrixID = glGetUniformLocation(asset.programID, "MVP");
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, asset.vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, asset.vertices.size() * sizeof(glm::vec3), &asset.vertices[0], GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glDrawArrays(GL_TRIANGLES, 0, asset.vertices.size());
glDisableVertexAttribArray(0);
}
my model, view and projection matrices are defined as:
glm::mat4 model = glm::mat4(1.0f);
glm::mat4 view = glm::lookAt(glm::vec3(5, 5, 10),
glm::vec3(0, 0, 0),
glm::vec3(0, 1, 0));
glm::mat4 projection = glm::perspective(45.0f, (float) _windowWidth / _windowHeight, 0.1f, 100.0f);
and finally, my Asset struct:
struct Asset
{
Asset() { }
Asset(std::string assetOBJFile, std::string vertexShader, std::string fragmentShader)
{
glGenVertexArrays(1, &vertexArrayID);
glBindVertexArray(vertexArrayID);
programID = LoadShaders(vertexShader.c_str(), fragmentShader.c_str());
// Read our .obj file
std::vector<glm::vec2> uvs;
std::vector<glm::vec3> normals;
loadOBJ(assetOBJFile.c_str(), vertices, uvs, normals);
// Load it into a VBO
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(glm::vec3), &vertices[0], GL_STATIC_DRAW);
//velocity = glm::vec3(0.0, 1.0, 1.0);
velocity = glm::vec3(0.0, 0.0, 0.0);
position = glm::vec3(0.0, 0.0, 0.0);
lastTime = glfwGetTime();
}
GLuint vertexArrayID;
GLuint programID;
GLuint vertexbuffer;
std::vector<glm::vec3> faces;
std::vector<glm::vec3> vertices;
glm::vec3 velocity;
double lastTime;
glm::vec3 position;
};
It looks like you're adding the current asset.position to your vertex positions on every iteration, replacing the previous positions. From the moveAsset() function:
for (glm::vec3 &vertex : asset.vertices)
{
glm::vec4 transformedVector = glm::translate(glm::mat4(1.0f), asset.position) *
glm::vec4(vertex.x, vertex.y, vertex.z, 1);
vertex = glm::vec3(transformedVector.x, transformedVector.y, transformedVector.z);
}
Neglecting the velocity for a moment, and assuming that you have an original vertex at (0, 0, 0), you would move it to asset.position on the first iteration. Then add asset.position again on the second iteration, which places it at 2 * asset.position. Then on the third iteration, add asset.position to this current position again, resulting in 3 * asset.position. So after n steps, the vertices will be around n * asset.position. Even if your object might be visible initially, it would move out of the visible range before you can blink.
To get your original strategy working, the most straightforward approach is to have two lists of vertices. One list contains your original object coordinates, which you never change. Then before you draw, you build a second list of vertices, calculated as the sum of the original vertices plus the current asset.position, and use that second list for rendering.
The whole thing is... not very OpenGL. There's really no need to modify the vertex coordinates on the CPU. You can make the translation part of the transformation applied in your vertex shader. You already have a model matrix in place. You can simply put the translation by asset.position into the model matrix, and recalculate the MVP matrix. You already have the glUniformMatix4fv() call to pass the new matrix to the shader program in your renderAsset() function.
When I use immediate mode, it draws correctly but when i pass the same vertices to the GPU, or even point them, it doesn't work.
there is a position buffer holding vertices :
std::vector<glm::vec3> posbuf;
and indices for it.
immediate mode :
for (unsigned int i =0; i < indices.size(); i++) {
glBegin(GL_TRIANGLES);
glVertex3f(posbuf[indices[i].index[0]].x, posbuf[indices[i].index[0]].y, posbuf[indices[i].index[0]].z);
glVertex3f(posbuf[indices[i].index[1]].x, posbuf[indices[i].index[1]].y, posbuf[indices[i].index[1]].z);
glVertex3f(posbuf[indices[i].index[2]].x, posbuf[indices[i].index[2]].y, posbuf[indices[i].index[2]].z);
glEnd();
}
and this is the vertex attribute code :
glEnableVertexAttribArray(GL_ATTRIB_POS);
glVertexAttribPointer(GL_ATTRIB_POS, 3, GL_FLOAT, GL_FALSE, 0,&posbuf[0]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indVBO);
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(GL_ATTRIB_POS);
and a shader for it :
#version 120
attribute vec3 position;
void main()
{
vec4 finalv=vec4(position, 1.0);
gl_Position = gl_ModelViewProjectionMatrix * vec4(finalv.xyz,1.0);
}
[frag]
#version 120
void main()
{
gl_FragColor = vec4(1.0,0.0,0.0,1.0);
}
immediate result :
shader result:
I don't know what's wrong, i also tried to pass posbuf using glm::value_ptr, they all give the same result. I am on fedora 18, supporting glsl up to #version 140, opengl 3.3.
EDIT :
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indVBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(Triangle) * indices.size(), &indices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
struct Triangle {
int index[3];
};
It seems your indices vector contains complete triangles consisting of 3 indices each. But then in glDrawElements you use indices.size() as the number of elements to draw. But glDrawElements doesn't take the number of primitives, but the number of indices, so you're missing 2/3 of your triangles. It should rather be
glDrawElements(GL_TRIANGLES, indices.size() * 3, GL_UNSIGNED_INT, 0);
(And indices should be renamed to triangles to avoid further confusion. By the way, index should probably be unsigned int[3] as promised to OpenGL. On any reasonable system there won't be a representational difference between ints and unsigned ints, but it is a bit inconsistent to use int while telling OpenGL it's an unsigned int.)