OpenGL - Draw object with image texture from .obj/.mtl - c++

I'm attempting to create a cube with an image texture using OpenGL with Google Cardboard for Android. My code is largely based off of Google's demo code here. I can render the black/texture-less cube just fine, but the texture is what has me stuck.
FILES
Here is the .obj file:
mtllib 9c9ab3c3-ea26-4524-88f5-a524b6bb6057.mtl
g Mesh1 Model
usemtl Tile_Hexagon_White
v 2.16011 0 -2.27458
vt -7.08697 8.39536
vn 0 -1 -0
v 1.16011 0 -1.27458
vt -3.80613 4.70441
v 1.16011 0 -2.27458
vt -3.80613 8.39536
f 1/1/1 2/2/1 3/3/1
v 2.16011 0 -1.27458
vt -7.08697 4.70441
f 2/2/1 1/1/1 4/4/1
vt 7.46254 0
vn 1 0 -0
v 2.16011 1 -1.27458
vt 4.1817 3.69094
vt 4.1817 0
f 1/5/2 5/6/2 4/7/2
v 2.16011 1 -2.27458
vt 7.46254 3.69094
f 5/6/2 1/5/2 6/8/2
vt -7.08697 0
vn 0 0 -1
v 1.16011 1 -2.27458
vt -3.80613 3.69094
vt -7.08697 3.69094
f 1/9/3 7/10/3 6/11/3
vt -3.80613 0
f 7/10/3 1/9/3 3/12/3
vt -4.1817 0
vn -1 0 -0
vt -7.46254 3.69094
vt -7.46254 0
f 2/13/4 7/14/4 3/15/4
v 1.16011 1 -1.27458
vt -4.1817 3.69094
f 7/14/4 2/13/4 8/16/4
vt 3.80613 0
vn 0 0 1
vt 7.08697 3.69094
vt 3.80613 3.69094
f 2/17/5 5/18/5 8/19/5
vt 7.08697 0
f 5/18/5 2/17/5 4/20/5
vt 7.08697 4.70441
vn 0 1 -0
vt 3.80613 8.39536
vt 3.80613 4.70441
f 5/21/6 7/22/6 8/23/6
vt 7.08697 8.39536
f 7/22/6 5/21/6 6/24/6
And here is the .mtl file:
newmtl Tile_Hexagon_White
Ka 0.000000 0.000000 0.000000
Kd 0.835294 0.807843 0.800000
Ks 0.330000 0.330000 0.330000
map_Kd 9c9ab3c3-ea26-4524-88f5-a524b6bb6057/Tile_Hexagon_White.jpg
newmtl ForegroundColor
Ka 0.000000 0.000000 0.000000
Kd 0.000000 0.000000 0.000000
Ks 0.330000 0.330000 0.330000
The texture is just a repeating hexagonal pattern that I want to cover the entire cube:
CODE
Load the OBJ file and separate the data into arrays.
I have Vertices_[108], UV_[72], and Indices_[36]. I am confident that the program is functioning properly at this point, but I can provide the values if necessary.
I load the image after:
GLuint texture_id;
glGenTextures(1, &texture_id);
imageNum = texture_id;
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, imageNum);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
/*I don't really know how this block of code works but it's supposed to Load an image from the asset manager.
* env - the JNIEnv* environment
* java_asset_mgr - AAssetManager for fetching files
* path - the path and name of my texture
*/
jclass bitmap_factory_class =
env->FindClass("android/graphics/BitmapFactory");
jclass asset_manager_class =
env->FindClass("android/content/res/AssetManager");
jclass gl_utils_class = env->FindClass("android/opengl/GLUtils");
jmethodID decode_stream_method = env->GetStaticMethodID(
bitmap_factory_class, "decodeStream",
"(Ljava/io/InputStream;)Landroid/graphics/Bitmap;");
jmethodID open_method = env->GetMethodID(
asset_manager_class, "open", "(Ljava/lang/String;)Ljava/io/InputStream;");
jmethodID tex_image_2d_method = env->GetStaticMethodID(
gl_utils_class, "texImage2D", "(IILandroid/graphics/Bitmap;I)V");
jstring j_path = env->NewStringUTF(path.c_str());
RunAtEndOfScope cleanup_j_path([&] {
if (j_path) {
env->DeleteLocalRef(j_path);
}
});
jobject image_stream =
env->CallObjectMethod(java_asset_mgr, open_method, j_path);
jobject image_obj = env->CallStaticObjectMethod(
bitmap_factory_class, decode_stream_method, image_stream);
env->CallStaticVoidMethod(gl_utils_class, tex_image_2d_method, GL_TEXTURE_2D, 0,
image_obj, 0);
glGenerateMipmap(GL_TEXTURE_2D);
Draw:
//obj_program has a vertex shader and fragment shader attached.
glUseProgram(obj_program_);
std::array<float, 16> target_array = modelview_projection_target_.ToGlArray();
glUniformMatrix4fv(obj_modelview_projection_param_, 1, GL_FALSE,
target_array.data());
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, imageNum);
//DRAW
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, false, 0, Vertices_.data());
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, false, 0, Uv_.data());
glDrawElements(GL_TRIANGLES, Indices_.size(), GL_UNSIGNED_SHORT,
Indices_.data());
RESULT
I end up with a cube that is oddly textured and I am unsure what is causing it.
I am sure there will be questions so I will do my best to answer and provide any information that will help!

The issue was that my texture needed to be repeated. These two lines were changed from
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
to
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);

Related

How to properly align NPOT textures with odd row pixel width?

I am using LUT files stored in .cube files for image post processing.
The LUT.cube file layout may look like this:
TITLE
LUT_3D_SIZE 2
1.000000 1.000000 1.000000 -> takes black pix. from input image and transfers to white output pix
1.000000 0.000000 0.000000
0.000000 1.000000 0.000000
0.000000 0.000000 1.000000
1.000000 1.000000 0.000000
1.000000 0.000000 1.000000
0.000000 1.000000 1.000000
0.000000 0.000000 0.000000 -> takes white pix. from input image and transfers to black output pix.
I load the raw data from the file to data vector (this part is 100% correct) and then load its data to GL_TEXTURE_3D and use it calling glActiveTexture(GL_TEXTURE0_ARB + unit); as a sampler3D in frag. shader utilizing GLSL texture(...) function for free interpolation.
glGenTextures(1, &texID);
glBindTexture(GL_TEXTURE_3D, texID);
constexpr int MIPMAP_LEVEL = 0; // I do not need any mipmapping for now.
//glPixelStorei(GL_UNPACK_ALIGNMENT, 1); -> should I use this???
glTexImage3D(
GL_TEXTURE_3D,
MIPMAP_LEVEL,
GL_RGB,
size.x, // Size of the LUT 2, 33 etc., x == y == z.
size.y,
size.z,
0,
GL_RGB,
GL_FLOAT, // I assume this is correct regarding how are stored the data in the LUT .cube file.
data
);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAX_LEVEL, MIPMAP_LEVEL);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_3D, 0);
For simplycity lets say that input image is restricted to be RGB8 or RGB16 format.
This works OK, when LUT_3D_SIZE is 2 (I suppose any POT will work), producing expectable constant color transformation output.
However these LUT files can have LUT_3D_SIZE parameter of NPOT size - odd numbers like 33 typicaly and there is where I start to have problems and non-deterministic output (the output texture is varying every post process run - I assume this is because the non-aligned texture is filled with some random data).
How I should address this problem?
I guess I could use glPixelStorei(GL_PACK/UNPACK_ALIGNMENT, x); to compensate odd pixel row width (33), but I would like to understand what (math) is going on instead of trying to pick up some random alignment that would magicaly work for me. Also I am not sure if this is the real problem I am facing, so...
For clarification I am on desktop GL 3.3+ available, Nvidia card.
So the long story short: the problem was that something somewhere in the rest of the code was setting the GL_UNPACK_ALIGNMENT to 8 (find out using glGetIntegerv(GL_UNPACK_ALIGNMENT, &align)) which was not possible to divide the actual row byte size to get the whole number so the texture was malformed.
As the actual row byte size is 33 * 33 * 12 (width * depth * sizeof(GL_FLOAT) * 3 (RGB)) = 13 068 which is dividible by 1, 2 AND also 4 (so all of these possibilities must be valid if used by image loader) it was revealed in the end that it works for all of these settings and I must missed something (or forget to recompile or whatever when I experienced problems with GL_UNPACK_ALIGNMENT setted to 1 as it is valid option and now I experience it working properly - same with values 2 and 4).

Using indices not working properly in open gles c++

I am using glDrawElement() for rendering a plain but the indexing is not working as intended so i am getting an irrelevant object .I have used AAssetmanager for loading the coordinates from a wavefront object file .
here is my code for rendering :
GLushort squareindices[] = {
0 , 1 ,2,
0, 3, 2
};
GLfloat vertexColor[] = {
1.0f, 1.0f , 0.0f,
0.0f , 0.0f , 1.0f,
1.0f, 1.0f , 0.0f,
0.0f , 0.0f , 1.0f,
};
void flatPlain::render(GLuint program) {
if (renderSelf) {
// flatPlainProgram = program;
auto *gens = new programGenerator;
auto *mats = new matrixOperation;
flatPlainProgram = gens->createProgram(vertexplain, fragmentplain);
// LOGE2("%x" , flatPlainProgram);
vertexLocation = glGetAttribLocation(flatPlainProgram, "vertexPosition");
// textureLocation = glGetAttribLocation(flatPlainProgram, "texturecord");
vertexColorlocation = glGetAttribLocation(flatPlainProgram, "vertexColour");
projection = glGetUniformLocation(flatPlainProgram, "projection");
model = glGetUniformLocation(flatPlainProgram, "modelView");
// sampleLocation = glGetUniformLocation(flatPlainProgram, "texture");
mats->perspective(projectionMatrix, (float) fov, (float) w / (float) h, 0.1f, 100.0f);
// if ( image != nullptr)
// loadTexture();
mats->createVolume(modelMatrix);
mats->rotateX(modelMatrix, angle);
mats->translateMatrix(x, y, z, modelMatrix);
glUseProgram(flatPlainProgram);
glVertexAttribPointer(vertexLocation, 3, GL_FLOAT, GL_FALSE, 0, vertexCord);
glEnableVertexAttribArray(vertexLocation);
glVertexAttribPointer(vertexColorlocation, 3, GL_FLOAT, GL_FALSE, 0, vertexColor);
glEnableVertexAttribArray(vertexColorlocation);
glUniformMatrix4fv(projection, 1, GL_FALSE, projectionMatrix);
glUniformMatrix4fv(model, 1, GL_FALSE, modelMatrix);
// glUniform1i(sampleLocation, 0);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, squareindices);
glDeleteProgram(flatPlainProgram);
}
}
I have read the vertices and then rendered it.
my .obj file:
v 0.000000 0.000000 0.000000
v 10.000000 0.000000 0.000000
v 0.000000 0.000000 -10.000000
v 10.000000 0.000000 -10.000000
vt 0.000000 0.000000
vt 1.000000 0.000000
vt 1.000000 1.000000
vt 0.000000 1.000000
vn 0.0000 1.0000 0.0000
usemtl None
s off
f 1/1/1 2/2/1 4/3/1 3/4/1
my output:
This are your vertex coordinates
0: ( 0.0, 0.0, 0.0)
1: (10.0, 0.0, 0.0)
2: ( 0.0, 0.0, -10.0)
3: (10.0, 0.0, -10.0)
0 1
+-----+
| |
| |
+-----+
2 3
Triangulate the quad with the following indices:
0, 1, 2, 1, 3, 2
0 1
+-----+ +
| / / |
| / / |
+ +-----+
2 3

OpenGL Skybox visible borders

I have my skybox showing:
But there are borders of the box, which I don't want. I already searched the internet and they all said that GL_CLAMP_TO_EDGE should work, but I am still seeing the borders.
This is what I used for the texture loading:
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
Can anyone tell me what I am doing wrong?
EDIT:
Strange thing is that the borders are only showing at the top of the skybox. so when a skybox face, touches the roof of the box.
Here an image of it:
I finally found the solution. This is a filthy mistake in the texture itself. There is a black border around the texture, but you can barely see it unless you zoom in. So I removed the borders and it worked.
Its texture coordinates floating error. If you use shaders you can clean it to strict [0.0f, 1.0f]. I cannot say is there is any possible solution for OpenGL API calls. But shaders must support this. Example using HLSL 2.0 (NVIDIA Cg) for post screen shader.
float g_fInverseViewportWidth: InverseViewportWidth;
float g_fInverseViewportHeight: InverseViewportHeight;
struct VS_OUTPUT {
float4 Pos: POSITION;
float2 texCoord: TEXCOORD0;
};
VS_OUTPUT vs_main(float4 Pos: POSITION){
VS_OUTPUT Out;
// Clean up inaccuracies
Pos.xy = sign(Pos.xy);
Out.Pos = float4(Pos.xy, 0, 1);
// Image-space
Out.texCoord.x = 0.5 * (1 + Pos.x + g_fInverseViewportWidth);
Out.texCoord.y = 0.5 * (1 - Pos.y + g_fInverseViewportHeight);
return Out;
}
Where sign rutine is used for strict [0, 1] texture coord specification. Also there is sign rutine for GLSL you may use. sign retrives sign of the vector or scalar it mean -1 for negative and 1 for positive value so to pass texture coord for vertex shader it must be specified as -1 for 0 and 1 for 1 than you may use this like formulae for actual texture coord specification:
Out.texCoord.x = 0.5 * (1 + Pos.x + g_fInverseViewportWidth);
Out.texCoord.y = 0.5 * (1 - Pos.y + g_fInverseViewportHeight);
Here you can see texture 1 texel width inaccurancy:
Now with modified shader:

Vbo - drawing normals with obj file

On general my question is how can I provide 2 indices for the vbo. One for the vertices and one for the normals?
I got the next Obj file:
mtllib cube.mtl
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -0.999999
v 0.999999 1.000000 1.000001
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
vn 0.000000 -1.000000 0.000000
vn 0.000000 1.000000 0.000000
vn 1.000000 0.000000 0.000000
vn -0.000000 -0.000000 1.000000
vn -1.000000 -0.000000 -0.000000
vn 0.000000 0.000000 -1.000000
usemtl Material
f 1//1 2//1 3//1 4//1
f 5//2 8//2 7//2 6//2
f 1//3 5//3 6//3 2//3
f 2//4 6//4 7//4 3//4
f 3//5 7//5 8//5 4//5
f 5//6 1//6 4//6 8//6
As you can see there are 8 vertices and 6 normals. On the faces lines the file connects each vertex to the next vertex by indices and connects the normals too by different indices.
I am trying to draw with cube model with vbo. I have written the following code:
float vertex[] = {1, -1, -1,
1, -1, 1,
-1, -1, 1,
-1, -1, -1,
1, 1, -1,
1, 1, 1,
-1, 1, 1,
-1, 1, -1};
float normals[] = {0, -1, 0,
0, 1, 0,
1, 0, 0,
0, 0, 1,
-1, 0, 0,
0, 0, -1};
int index[] = {0, 1, 2, 3,
4, 7, 6, 5,
0, 4, 5, 1,
1, 5, 6, 2,
2, 6, 7, 3,
4, 0, 3, 8};
GLuint buffer, ind;
int offset = 0;
void vboInit()
{
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertex) + sizeof(normals), 0, GL_STATIC_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, offset, sizeof(vertex), vertex); offset+= sizeof(vertex);
glBufferSubData(GL_ARRAY_BUFFER, offset, sizeof(normals), normals); offset+= sizeof(normals);
glGenBuffers(1, &ind);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ind);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(index), index, GL_STATIC_DRAW);
}
void vboDraw()
{
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ind);
glNormalPointer(GL_FLOAT, 0, (GLvoid*)(sizeof(vertex)));
glVertexPointer(3, GL_FLOAT, 0, 0);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
for (int i = 0; i < 6; i++)
glDrawElements(GL_TRIANGLE_FAN, 4 + i*4, GL_UNSIGNED_INT, (GLvoid*)(i*4));
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, NULL);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, NULL);
}
This code uses the indices of the vertices for the normals. Therefore the normals does not loading well and I need different indices to the normals. The question is how can I provide 2 indices for the vbo. One for the vertices and one for the normals?
Short answer it that you can't. But if you want long one:
DrawElements takes indices arrays of vertices. Vertex is not a position; it is assembly of all attributes at once. When you fetching vertex by index, you must take it from the very same index from every attribute array. Good side of indices is that they enables usage of post-TNL cache (however, utilisation of cache depends upon actual indices values) and reduces memory usage.
If your data saved in OBJ-like layout where attribute arrays indexed in independent way, you have to convert your arrays to more friendly representation. Simple approach would be:
Allocate big array[s] so it can hold all vertex data even without indices.
For each triangle, fetch data to new arrays using your independent indices. As the result, your vertices are now don't have independent attributes indices - but they don't have indices at all! You already could draw though - with DrawArrays().
If you want indices, now you can recreate them. Remove duplicates from new vertices arrays (duplicates have entire vertices equal, not only positions), and for each triangle find index of vertex in new array.

WaveFront .obj loader doesn't display the object like it should (VBO & VAO)

I'm working on a WaveFront .obj Loader and my first goal is to get all the vertices and indices loaded from the following .obj file (displays a cube with curved edges).
# Blender v2.62 (sub 0) OBJ File: 'Cube.blend'
# www.blender.org
o Cube
v 0.900000 -1.000000 -0.900000
v 0.900000 -1.000000 0.900000
v -0.900000 -1.000000 0.900000
v -0.900000 -1.000000 -0.900000
v 0.900000 1.000000 -0.900000
v 0.899999 1.000000 0.900001
v -0.900000 1.000000 0.900000
v -0.900000 1.000000 -0.900000
v 1.000000 -0.900000 -1.000000
v 1.000000 -0.900000 1.000000
v -1.000000 -0.900000 1.000000
v -1.000000 -0.900000 -1.000000
v 1.000000 0.905000 -0.999999
v 0.999999 0.905000 1.000001
v -1.000000 0.905000 1.000000
v -1.000000 0.905000 -1.000000
f 1//1 2//1 3//1
f 1//1 3//1 4//1
f 13//2 9//2 12//2
f 13//2 12//2 16//2
f 5//3 13//3 16//3
f 5//3 16//3 8//3
f 15//4 7//4 8//4
f 15//5 8//5 16//5
f 11//6 15//6 16//6
f 11//6 16//6 12//6
f 14//7 6//7 7//7
f 14//7 7//7 15//7
f 10//8 14//8 11//8
f 14//8 15//8 11//8
f 13//9 5//9 6//9
f 13//9 6//9 14//9
f 9//10 13//10 10//10
f 13//11 14//11 10//11
f 9//12 1//12 4//12
f 9//12 4//12 12//12
f 3//13 11//13 12//13
f 3//14 12//14 4//14
f 2//15 10//15 11//15
f 2//15 11//15 3//15
f 1//16 9//16 10//16
f 1//16 10//16 2//16
f 5//17 8//17 7//17
f 5//17 7//17 6//17
I've loaded all the vertices and the indices in two vectors called verticesVec and indicesVec and I've checked them both (they have all the input in the correct order, verticesVec[0] = 0.9 and indicesVec[0] = 1). I load the vertices in a VertexBufferObject and the indices in a VerticArrayObject together with the shader fields settings.
However, when I'm rendering the cube I get the following image which is not correct. I've rechecked all my code but I can't find the part where I'm going wrong. Has it something to do with the vectors and the size in bytes of the vectors?
The following code is from my Draw/Render method that does all the rendering.
glUseProgram(theProgram);
GLuint modelToCameraMatrixUnif = glGetUniformLocation(theProgram, "modelToCameraMatrix");
GLuint perspectiveMatrixUnif = glGetUniformLocation(theProgram, "cameraToClipMatrix");
glUniformMatrix4fv(perspectiveMatrixUnif, 1, GL_FALSE, glm::value_ptr(cameraToClipMatrix));
glm::mat4 identity(1.0f);
glutil::MatrixStack stack(identity); // Load identity Matrix
// Push for Camera Stuff
glutil::PushStack camera(stack);
stack.Translate(glm::vec3(camX, camY, camZ));
//Push one step further to fix object 1
glutil::PushStack push(stack);
stack.Translate(glm::vec3(0.0f, 0.0f, 3.0f));
// Draw Blender object
glBindVertexArray(this->load.vertexArrayOBject);
glUniformMatrix4fv(modelToCameraMatrixUnif, 1, GL_FALSE, glm::value_ptr(stack.Top()));
glDrawElements(GL_TRIANGLES, this->load.indicesVec.size(), GL_UNSIGNED_SHORT, 0);
// Pop camera (call destructor to de-initialize all dynamic memory)
camera.~PushStack();
// Now reset all buffers/programs
glBindVertexArray(0);
glUseProgram(0);
And the initialisation of buffer objects in my .obj loader:
void Loader::generateVertexBufferObjects()
{
// Fils the vertex buffer object with data
glGenBuffers(1, &vertexBufferObject);
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
glBufferData(GL_ARRAY_BUFFER, verticesVec.size() * 4, &verticesVec[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// Fills the index buffer object with its data
glGenBuffers(1, &indexBufferObject);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObject);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indicesVec.size() * 4, &indicesVec[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
void Loader::generateVertexArrayObjects()
{
glGenVertexArrays(1, &vertexArrayOBject);
glBindVertexArray(vertexArrayOBject);
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferObject);
glBindVertexArray(0);
}
You're creating your indexes as unsigned ints, but you're passing to OpenGL that they are of type GL_UNSIGNED_SHORT. The type argument to glDrawElements must match the type of data uploaded to the index buffer.