Cube's edges in OpenGL - c++

I have drawn a cube in OpenGl to use it as skybox.
I'm using a single texture image and I use the UV coordinates to locate the 6 faces in the right positions.
I send the texture with the following code:
//------------------------------------Luminance--------------------------------------------------------------
glPixelStorei(GL_UNPACK_ROW_LENGTH, m_width);
glBindTexture(GL_TEXTURE_2D, texturePointer[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_width, m_height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_uYdata);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//--------------------------------------U component------------------------------------------------------------
glPixelStorei(GL_UNPACK_ROW_LENGTH, m_uvWidth);
glBindTexture(GL_TEXTURE_2D, texturePointer[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_uvWidth, m_uvHeight, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_uUdata);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//------------------------------------- V component-------------------------------------------------------------
glBindTexture(GL_TEXTURE_2D, texturePointer[2]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_uvWidth, m_uvHeight, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_uVdata);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
I can't remove the white edges between the faces of the cube, see the picture below:
GL_NEAREST remove the problem but the quality of the image is lower,
how can I use GL_LINEAR and remove this effect?
Edit:
The cube is created using the following Tutorial:
Model loading
Considering that the texture is made by six faces stitched in columns.
The actual Blender object is:
# Blender v2.77 (sub 0) OBJ File: 'demo_cube.blend'
# www.blender.org
o Cube
v 1.000000 -1.000000 -1.000000
v 1.0000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -1.000000
v 1.0000000 1.000000 1.000000
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
vt 0.833333333 0.0000
vt 0.666666666 1.0000
vt 0.833333333 1.0000
vt 0.833333333 0.0000
vt 1.000000000 1.0000
vt 1.000000000 0.0000
vt 0.333333333 1.0000
vt 0.500000000 0.0000
vt 0.333333333 0.0000
vt 0.500000000 1.0000
vt 0.666666666 0.0000
vt 0.500000000 0.0000
vt 0.000000000 0.0000
vt 0.166666666 1.0000
vt 0.166666666 0.0000
vt 0.333333333 0.0000
vt 0.166666666 1.0000
vt 0.333333333 1.0000
vt 0.666666666 0.0000
vt 0.833333333 1.0000
vt 0.500000000 1.0000
vt 0.666666666 1.0000
vt 0.000000000 1.0000
vt 0.166666666 0.0000
s off
f 2/1 4/2 1/3
f 8/4 6/5 5/6
f 5/7 2/8 1/9
f 6/10 3/11 2/12
f 3/13 8/14 4/15
f 1/16 8/17 5/18
f 2/1 3/19 4/2
f 8/4 7/20 6/5
f 5/7 6/21 2/8
f 6/10 7/22 3/11
f 3/13 7/23 8/14
f 1/16 4/24 8/17
//44 and 47
The fragment shader is:
#version 330 core
in vec2 oTexCoord;
uniform sampler2D yChannel;
uniform sampler2D uChannel;
uniform sampler2D vChannel;
out vec4 color;
const vec3 offset = vec3(0.0625, 0.5, 0.5);
const mat3 coef = mat3(
1.164, 0.0, 1.793,
1.164, -0.213, -0.533,
1.164, 2.112, 0.0
);
void main()
{
vec2 nTexCoord = vec2(oTexCoord.x, 1.0 - oTexCoord.y);
vec4 Tcolor = vec4(1.0);
if(oTexCoord.y <1 && oTexCoord.y > 0.0) {
if(oTexCoord.x < 1 && oTexCoord.x > 0.0) {
vec3 yuv = vec3(
texture(yChannel, nTexCoord).r,
texture(uChannel, nTexCoord).r,
texture(vChannel, nTexCoord).r
) - offset;
vec3 rgb = yuv * coef;
Tcolor = vec4(rgb, 1.0);
}
}
color = Tcolor;
}

I think you want to use cubemaps and GL_ARB_seamless_cube_map:
Although it is unlikely that the generated ( s t ) coordinate lies significantly outside the determined cube map face, it is often the case that the locations of the individual elements required during a linear sampling do not lie within the determined face, and their coordinates will therefore be modified by the selected clamping and wrapping rules. This often has the effect of producing seams or other discontinuities in the sampled texture.
This extension allows implementations to take samples from adjacent cube map faces, providing the ability to create seamless cube maps.
(emph. mine).
To use it, first check that you indeed have that extension (this depends on your platform and on which toolkit is eventually helping you into accessing OpenGL), then just write
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
to enable it (globally).

Related

OpenGL: skybox/cubemap is displayed partially, triangles of it are missing

I tried to create a textured skybox, following this tutorial:
cubemaps. Some triangles of the box (it is triangulated) are not shown at all, but weirdly enough not every second triangle of a square is missing (1 square's texturing is correct). My problem can be seen in the pictures below. The grayish color is "general" color of the scene, it has nothing to do with the skybox.
I believe the problem is caused by one of the shaders, as I get an
invalid operation
error, while trying to use the shaders of the skybox. What might be the problem?
Here the code of vertex shader:
#version 330 core
layout (location = 0) in vec3 aPos;
out vec3 TexCoords;
uniform mat4 projection;
uniform mat4 view;
void main() {
TexCoords = aPos;
vec4 pos = projection * view * vec4(aPos, 1.0);
gl_Position = pos.xyww;
}
and the fragment shader:
#version 330 core
out vec4 FragColor;
in vec3 TexCoords;
uniform samplerCube skybox;
void main() {
FragColor = texture(skybox, TexCoords);
}
and finally the procedure which loads the cubemap:
GLuint loadCubemap(std::vector<std::string> faces) {
GLuint textureID;
glGenTextures(1, &textureID);
glBindTexture(GL_TEXTURE_CUBE_MAP, textureID);
int width, height, nrChannels;
for (GLuint i = 0; i < faces.size(); i++) {
unsigned char *data = stbi_load(faces[i].c_str(), &width, &height, &nrChannels, 0);
if (data) {
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i,
0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data);
stbi_image_free(data);
} else {
std::cout << "Cubemap texture failed to load at path: " << faces[i] << std::endl;
stbi_image_free(data);
}
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
return textureID;
}
The requested geometry (it's a .obj file):
o Cube
v 300.000000 -300.000000 -300.000000
v 300.000000 -300.000000 300.000000
v -300.00000 -300.000000 300.000000
v -300.00000 -300.000000 -300.00000
v 300.000000 300.000000 -300.000000
v 300.000000 300.000000 300.00000
v -300.000000 300.000000 300.000000
v -300.000000 300.000000 -300.000000
vn 0.0000 -1.0000 0.0000
vn 0.0000 1.0000 0.0000
vn 1.0000 -0.0000 0.0000
vn -0.0000 -0.0000 1.0000
vn -1.0000 -0.0000 -0.0000
vn 0.0000 0.0000 -1.0000
s off
f 2//1 4//1 1//1
f 8//2 6//2 5//2
f 5//3 2//3 1//3
f 6//4 3//4 2//4
f 3//5 8//5 4//5
f 1//6 8//6 5//6
f 2//1 3//1 4//1
f 8//2 7//2 6//2
f 5//3 6//3 2//3
f 6//4 7//4 3//4
f 3//5 7//5 8//5
f 1//6 4//6 8//6
I have found the problem! It was not caused by the shaders but by my own stupidity. In the main rendering loop, I forgot to remove an extra binding VAO-buffer + drawing triangles pair after the newly inserted one.

How to properly upscale a texture in opengl?

Let's say, for the simplicity of this question, I want to create a texture containing a color gradient from black to red. I know there are much easier ways to achieve this but it is a good example for my problem.
I figured, i could simply create a texture from an float array like so:
float values[4] {
0.f, 1.f,
0.f, 1.f
}
// gen texture
unsigned int texId;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
// set filter and wrap
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// copy values
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, 2, 2, 0, GL_RED, GL_FLOAT, values);
And draw it:
float vertices[8] {
-1.f, -1.f, 0.f, 0.f,
1.f, -1.f, 1.f, 0.f,
1.f, 1.f, 1.f, 1.f,
-1.f, 1.f, 0.f, 1.f
}
// generate vertex buffer, vertex buffer layout and vertex array
unsigned int[6] indices {
0, 1, 2,
2, 3, 1
}
// generate index buffer
// bind everything
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, nullptr);
Where the bound vertex shader looks like this:
#version 330 core
layout(location=0) in vec2 in_position;
layout(location=1) in vec2 in_texCoords;
out vec2 texCoords;
void main() {
gl_Position = vec4(in_position, 0.f, 1.f);
texCoords = in_texCoords;
}
And the fragment shader like this:
#version 330 core
layout(location = 0) out vec4 out_color;
in vec2 texCoords;
uniform sampler2D u_gradient;
void main() {
float red = texture(u_gradient, texCoords).r;
out_color = vec4(red, 0.f, 0.f, 1.f);
}
Following this I'd expect to get an color gradient from left to right (black to red) across my entire window.
However what I get is a gradient from about 1/4 to 3/4 of the window in x and y direction (See image below). I've also noted that the wrap of repeat does not seem to apply here as what I get looks like mirror-repeat.
Result:
I've played around with the fragment shader using fixed values instead of texCoords and figured that a range from .25 to .75 on the x axis represents the entire gradient (.25 maps to 0 for red and .75 to 1).
Changing the amount of 'steps' passed as values to the texture (eg. a 4x4 array) did not change the results.
I've also tried using an image as texture (loaded with stb_image, resolution 1920x1080) which works perfectly fine and spreads across the entire screen.
For clarification: Why do the texCoords for the gradient range from .25 to .75 and not from 0 to 1 and how can I fix it (besides adjusting the texCoords itself)?
Lets assume you have a 2x2 texture
2x2 Texture
If this texture is wrapped on a grid of 6x6 fragments, the the center of a texel is on exactly on the texel in the middel of 3x3 tile of the 6x6 square:
6x6 quad
The color of the other fragments depends on the the texture parameters - see glTexParameter.
Since the texture is magnified, the GL_TEXTURE_MAG_FILTER is significant.
If it is GL_NEAREST, then the color of the fragments is that one of the closest texel, to the texture coordinates of the fragment:
If it is GL_LINEAR, then the color is interpolated, by the weighted average of the 4 pixels which are closest to the texture coordinates.
The interpolation at the borders of the 6x6 quad depends on the wrap parameters GL_TEXTURE_WRAP_S and GL_TEXTURE_WRAP_T.
If the parameters are GL_REPEAT (which is default), the the texture is treated as an endless texture and the interpolation of the interpolation of the color at the borders takes into account the texels on the opposite side of the texture. This is used for seamless textures and tiling:
If it is GL_CLAMP_TO_EDGE, then the interpolated color at the borders is clamped to the color of the texels at the border of the texture:
Apply the texture wrap parameter GL_CLAMP_TO_EDGE to the texture object,
float values[4] { 0.f, 1.f, 0.f, 1.f };
unsigned int texId;
glGenTextures(1, &texId);
glBindTexture(GL_TEXTURE_2D, texId);
// set filter and wrap
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
// copy values
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32F, 2, 2, 0, GL_RED, GL_FLOAT, values);
to get the following gradient:

Why is my texture loaded into opengl not mapped correctly?

I am trying to load a simple model in openGL. Currently, I have a problem with the textures. The texture definitely does show up, but it is messed up and it also does not cover all the model (part of the model is black). I have tried several things to determine what is the source of the problem. I passed in a uniform red texture and it renders correctly. I used the texture coord as R and G value in the fragment shader and I get a red-green model so I assume the texture coordinate is also fine.
The texture is messed up, and part of it is black. This is just a simple minecraft character
the model's texture, which is a png
Here's how I am creating texture:
imageData = stbi_load(path.c_str(), &width, &height, &colorFormat, 0);
glGenTextures(1, &texID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texID);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
My fragment shader:
void main(){
vec4 l = normalize(lightSourcePosition - positionEyeCoord);
vec4 normalFrag = normalize(normalEyeCoord);
if (hasTexture == 1){
vec4 textureKD = vec4(texture(textureSampler, texCoord).rgba);
vec4 diffuse = vec4(lightIntensity, 0.0) * textureKD * max(0.0, dot(l, normalFrag));
vec4 ambient = vec4(ambientIntensity, 0.0) * textureKD;
gl_FragColor = ambient + diffuse;
}
else {
vec4 diffuse = vec4(lightIntensity * KD, 0.0) * max(0.0, dot(l, normalFrag));
vec4 ambient = vec4(ambientIntensity * KA, 0.0);
gl_FragColor = ambient + diffuse;
}
}
Most likely, your character has been modeled using DirectX conventions for texturing. In DirectX, the texture coordinate origin is the top left of the image, whereas it is the bottom left corner in OpenGL.
There are a couple of things you can do (choose one):
When you load the model, replace all texture coordinates (u, v) by (u, 1 - v).
When you load the texture, flip the image vertically
In your shader, use vec2(texCoord.x, 1 - texCoord.y) for your texture coordinates.
I advise against using the third option for anything other than quick testing.

Trouble when using OpenGL .obj normals

I'm trying to render a cube using an exported.obj file by Blender but the lighting looks like wrong.
I think it's because my vertices array contains 8 values but the normals array has only 6.
I can not understand how OpenGL uses this index array. It's a little bit magic for me.
Can anyone help me?
This is the file:
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 1.000000 0.999999
v -0.999999 1.000000 -1.000001
v 1.000000 1.000000 -1.000000
v 1.000000 1.000000 1.000000
vn 0.0000 -1.0000 0.0000
vn 0.0000 1.0000 -0.0000
vn -1.0000 -0.0000 0.0000
vn 0.0000 -0.0000 -1.0000
vn 1.0000 -0.0000 0.0000
vn -0.0000 0.0000 1.0000
f 2//1 4//1 1//1
f 8//2 6//2 5//2
f 5//3 2//3 1//3
f 6//4 3//4 2//4
f 3//5 8//5 4//5
f 1//6 8//6 5//6
f 2//1 3//1 4//1
f 8//2 7//2 6//2
f 5//3 6//3 2//3
f 6//4 7//4 3//4
f 3//5 7//5 8//5
f 1//6 4//6 8//6
This is my code:
GLuint cubeVAO, cubeVerticesVBO, cubeColorsVBO, cubeNormalsVBO, cubeIndicesVBO;
glGenVertexArrays(1, &cubeVAO);
glBindVertexArray(cubeVAO);
glGenBuffers(1, &cubeVerticesVBO);
glBindBuffer(GL_ARRAY_BUFFER, cubeVerticesVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_vertices), cube_vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), BUFFER_OFFSET(0));
glEnableVertexAttribArray(0);
glGenBuffers(1, &cubeColorsVBO);
glBindBuffer(GL_ARRAY_BUFFER, cubeColorsVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_colors), cube_colors, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), BUFFER_OFFSET(0));
glEnableVertexAttribArray(1);
glGenBuffers(1, &cubeNormalsVBO);
glBindBuffer(GL_ARRAY_BUFFER, cubeNormalsVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_normals), cube_normals, GL_STATIC_DRAW);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), BUFFER_OFFSET(0));
glEnableVertexAttribArray(2);
glGenBuffers(1, &cubeIndicesVBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cubeIndicesVBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(cube_indices), cube_indices, GL_STATIC_DRAW);
glBindVertexArray(cubeVAO);
cubeShader.use();
glm::mat4 model;
cubeShader.setMat4("model", model);
cubeShader.setMat4("view", view);
cubeShader.setMat4("projection", projection);
cubeShader.setVec3("lampColor", lampColor);
cubeShader.setVec3("lampPos", lampPos);
cubeShader.setVec3("viewPos", viewPos);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, 0);
Vertex Shader
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec3 normal;
out vec3 Color;
out vec3 Normal;
out vec3 FragPos;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
vec4 posv4 = vec4(position, 1.0f);
Color = color;
FragPos = vec3(model * posv4);
Normal = normal;
gl_Position = projection*view*model*posv4;
}
Fragment Shader:
#version 330 core
in vec3 Color;
in vec3 Normal;
in vec3 FragPos;
out vec4 FragColor;
uniform vec3 lampColor;
uniform vec3 lampPos;
uniform vec3 viewPos;
void main()
{
// ambient
float ambientStrength = 0.1;
vec3 ambient = ambientStrength * lampColor;
// diffuse
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(lampPos - FragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = diff * lampColor;
// specular
float specularStrength = 0.5;
vec3 viewDir = normalize(viewPos - FragPos);
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), 32);
vec3 specular = specularStrength * spec * lampColor;
vec3 result = (ambient + diffuse + specular) * Color;
FragColor = vec4(result, 1.0);
}
What's wrong? ,-,
Demonstration video:
https://vid.me/vy17h
You'll have to expand the data in the OBJ file. OpenGL expects a normal to be specified for each vertex, individually. In the OBJ file vertices and normals are treated separately. What you have to do is find the unique vertex+normal combinations, store that and index into that new vertex+normal array instead of using separate indices.
With modern OpenGL it is somewhat possible to use different indices for each vertex attribute by making use of vertex shader texture fetch, storing vertex data in textures. But this comes with a performance hit.
The normals array only has 6 values because the cube has 6 faces and you have created a model where each face has 4 normals at the corners pointing in the same direction (ie are identical) - which explains the lighting not looking how you expect.
The obj file uses indexing so it can assign one of these 6 unique values to each of the 8 vertices

glDrawElement crashes using GLSL shaders

I coded a simple program using GLSL which must display a simple textured Box. To do this I load an OBJ file called 'Box.mesh' and next I initialize VBOs for the vertex, normal, texture and index buffer.
Here's the Box.mesh file content :
o Cube
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -0.999999
v 0.999999 1.000000 1.000001
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
vt 0.626059 0.265705
vt 0.626059 0.487398
vt 0.404365 0.487398
vt 0.626060 0.930786
vt 0.404365 0.930786
vt 0.404365 0.709091
vt 0.847752 0.487397
vt 0.847753 0.709091
vt 0.626059 0.709091
vt 0.182672 0.487397
vt 0.626059 0.044011
vt 0.404366 0.265704
vt 0.182671 0.709091
vt 0.404366 0.044011
vn 0.000000 -1.000000 0.000000
vn -0.000000 1.000000 0.000000
vn 1.000000 -0.000000 0.000001
vn -0.000000 -0.000000 1.000000
vn -1.000000 -0.000000 -0.000000
vn 0.000000 0.000000 -1.000000
vn 1.000000 0.000000 -0.000000
usemtl BoxMtl
s off
f 1/1/1 2/2/1 3/3/1
f 5/4/2 8/5/2 7/6/2
f 1/7/3 5/8/3 6/9/3
f 2/2/4 6/9/4 3/3/4
f 3/3/5 7/6/5 4/10/5
f 5/11/6 1/1/6 4/12/6
f 4/12/1 1/1/1 3/3/1
f 6/9/2 5/4/2 7/6/2
f 2/2/7 1/7/7 6/9/7
f 6/9/4 7/6/4 3/3/4
f 7/6/5 8/13/5 4/10/5
f 8/14/6 5/11/6 4/12/6
And a piece of code of my program :
#define OFFSET_BUFFER(offset) ((char*)NULL + (offset))
[...]
//VBO Initialization
Basic::OBJReader objReader;
Basic::OBJImage objImg = objReader.Parse("Box.mesh");
GLuint handle[4];
glGenBuffers(1, handle);
std::vector<float> position = objImg.GetVertexPosition();
glBindBuffer(GL_ARRAY_BUFFER, handle[0]);
glBufferData(GL_ARRAY_BUFFER, position.size() * sizeof(GLfloat), &position[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
std::vector<float> normal = objImg.GetVertexNormal();
glBindBuffer(GL_ARRAY_BUFFER, handle[1]);
glBufferData(GL_ARRAY_BUFFER, normal.size() * sizeof(GLfloat), &normal[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
std::vector<float> texture = objImg.GetVertexTexture();
glBindBuffer(GL_ARRAY_BUFFER, handle[2]);
glBufferData(GL_ARRAY_BUFFER, texture.size() * sizeof(GLfloat), &texture[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
std::vector<unsigned int> faces = objImg.GetOBJFaceImageList().at(0).GetFaceData();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, handle[3]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, faces.size() * sizeof(GLuint), &faces[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
[...]
/*Main loop*/
while (isAlive == true)
{
[...]
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, handle[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, handle[1]);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, handle[2]);
glBindTexture(GL_TEXTURE_2D, textureID);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, handle[3]);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
[...]
}
I checked the content and the size of all the buffers and they are correct. So I don't understand why I have a crash at the first call of glDrawElements. I'm really lost. Does anyone can help me, please? Thanks a lot in advance for your help.
Assuming handle[3] is your element array buffer, this is wrong prior to calling glDrawElements (...):
glBindBuffer(GL_ARRAY_BUFFER, handle[3]);
It needs to be:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, handle[3]);
Presumably you have no element array buffer bound, and this instructs OpenGL that the final parameter in glDrawElements (...) is a pointer to client memory (instead of an offset into a buffer object's memory). GL will attempt to dereference a NULL pointer when it comes time to pull the vertex indices and then this happens.