glDrawElement crashes using GLSL shaders - opengl

I coded a simple program using GLSL which must display a simple textured Box. To do this I load an OBJ file called 'Box.mesh' and next I initialize VBOs for the vertex, normal, texture and index buffer.
Here's the Box.mesh file content :
o Cube
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -0.999999
v 0.999999 1.000000 1.000001
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
vt 0.626059 0.265705
vt 0.626059 0.487398
vt 0.404365 0.487398
vt 0.626060 0.930786
vt 0.404365 0.930786
vt 0.404365 0.709091
vt 0.847752 0.487397
vt 0.847753 0.709091
vt 0.626059 0.709091
vt 0.182672 0.487397
vt 0.626059 0.044011
vt 0.404366 0.265704
vt 0.182671 0.709091
vt 0.404366 0.044011
vn 0.000000 -1.000000 0.000000
vn -0.000000 1.000000 0.000000
vn 1.000000 -0.000000 0.000001
vn -0.000000 -0.000000 1.000000
vn -1.000000 -0.000000 -0.000000
vn 0.000000 0.000000 -1.000000
vn 1.000000 0.000000 -0.000000
usemtl BoxMtl
s off
f 1/1/1 2/2/1 3/3/1
f 5/4/2 8/5/2 7/6/2
f 1/7/3 5/8/3 6/9/3
f 2/2/4 6/9/4 3/3/4
f 3/3/5 7/6/5 4/10/5
f 5/11/6 1/1/6 4/12/6
f 4/12/1 1/1/1 3/3/1
f 6/9/2 5/4/2 7/6/2
f 2/2/7 1/7/7 6/9/7
f 6/9/4 7/6/4 3/3/4
f 7/6/5 8/13/5 4/10/5
f 8/14/6 5/11/6 4/12/6
And a piece of code of my program :
#define OFFSET_BUFFER(offset) ((char*)NULL + (offset))
[...]
//VBO Initialization
Basic::OBJReader objReader;
Basic::OBJImage objImg = objReader.Parse("Box.mesh");
GLuint handle[4];
glGenBuffers(1, handle);
std::vector<float> position = objImg.GetVertexPosition();
glBindBuffer(GL_ARRAY_BUFFER, handle[0]);
glBufferData(GL_ARRAY_BUFFER, position.size() * sizeof(GLfloat), &position[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
std::vector<float> normal = objImg.GetVertexNormal();
glBindBuffer(GL_ARRAY_BUFFER, handle[1]);
glBufferData(GL_ARRAY_BUFFER, normal.size() * sizeof(GLfloat), &normal[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
std::vector<float> texture = objImg.GetVertexTexture();
glBindBuffer(GL_ARRAY_BUFFER, handle[2]);
glBufferData(GL_ARRAY_BUFFER, texture.size() * sizeof(GLfloat), &texture[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
std::vector<unsigned int> faces = objImg.GetOBJFaceImageList().at(0).GetFaceData();
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, handle[3]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, faces.size() * sizeof(GLuint), &faces[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
[...]
/*Main loop*/
while (isAlive == true)
{
[...]
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, handle[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, handle[1]);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, handle[2]);
glBindTexture(GL_TEXTURE_2D, textureID);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 0, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, handle[3]);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, OFFSET_BUFFER(0));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
[...]
}
I checked the content and the size of all the buffers and they are correct. So I don't understand why I have a crash at the first call of glDrawElements. I'm really lost. Does anyone can help me, please? Thanks a lot in advance for your help.

Assuming handle[3] is your element array buffer, this is wrong prior to calling glDrawElements (...):
glBindBuffer(GL_ARRAY_BUFFER, handle[3]);
It needs to be:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, handle[3]);
Presumably you have no element array buffer bound, and this instructs OpenGL that the final parameter in glDrawElements (...) is a pointer to client memory (instead of an offset into a buffer object's memory). GL will attempt to dereference a NULL pointer when it comes time to pull the vertex indices and then this happens.

Related

Using VBO to draw the cube by loading wavefront object

I am required to Using VBO to draw the cube by loading wavefront object.
by drawing the triangles.
here is the object:
v -1.000000 -1.000000 1.000000
v -1.000000 1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v -1.000000 1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v 1.000000 1.000000 1.000000
v 1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -1.000000
vn -1.0000 0.0000 0.0000
vn 0.0000 0.0000 -1.0000
vn 1.0000 0.0000 0.0000
vn 0.0000 0.0000 1.0000
vn 0.0000 -1.0000 0.0000
vn 0.0000 1.0000 0.0000
f 2//1 3//1 1//1
f 4//2 7//2 3//2
f 8//3 5//3 7//3
f 6//4 1//4 5//4
f 7//5 1//5 3//5
f 4//6 6//6 8//6
f 2//1 4//1 3//1
f 4//2 8//2 7//2
f 8//3 6//3 5//3
f 6//4 2//4 1//4
f 7//5 5//5 1//5
f 4//6 2//6 6//6
I also have a cooler array filled by myself which equals to 3 times number of face which is 36 indices
float colours[] = {
0.583f, 0.771f, 0.014f,
0.609f, 0.115f, 0.436f,
0.327f, 0.483f, 0.844f,
0.822f, 0.569f, 0.201f,
0.435f, 0.602f, 0.223f,
0.310f, 0.747f, 0.185f,
0.597f, 0.770f, 0.761f,
0.559f, 0.436f, 0.730f,
0.359f, 0.583f, 0.152f,
0.483f, 0.596f, 0.789f,
0.559f, 0.861f, 0.639f,
0.195f, 0.548f, 0.859f,
}
I store my vertices,normals,colours information into a array which is binding to VBO buffer:
float* vbo = new float[78]
which is in the order of vertices|normals|colours which is 24indices|18indices|36indices, I have been test it works very well
I use another array to store the vertices indices information which is 36indices ,for example: f 2//1 3//1 1//1 I store as 2/3/1 in my array
int *element = new int[36];
I have been test it works well ,I initial my vbo as follow:
static void init(void){
.......
Gluint vbo_id,index_id;
glGenBuffers(1,&vbo_id);
glGenBuffers(1,&index_id);
glBindBuffer(GL_ARRAY_BUFFER,vbo_id);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,index_id);
glBufferData(GL_ARRAY_BUFFER,sizeof(vbo),vbo,GL_STATIC_DRAW);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,sizeof(element),element,GL_STATIC_DRAW); }
and my drawing is here
static void display(void){
.......
glBindBuffer(GL_ARRAY_BUFFER,vbo_id);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,index_id);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glVertexPointer(3,GL_FLOAT,0,0);
glNormalPointer(GL_FLOAT,0,(void*)(24*sizeof(float)));//binding
glColorPointer(3,GL_FLOAT,0,(void*)(42*sizeof(float)));
for(int i=0;i<12;i++){
glBegin(GL_TRIANGLES);
glArrayElement(element[i]);
glArrayElement(element[i+1]);
glArrayElement(element[i+2]);
glEnd();
}
/* have try this one too:
glDrawElements(GL_TRIANGLES,3,GL_INT,0) */
glDisableClientState(GL_COLOR_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,0);
}
however there's nothing show on the screen. is my binding method correct, my instructor tells me when I draw with the indices , if I binding vertices and normals ,and colors correct , It will automatically matching by drawing with indices of vertices.
The type of vbo is float*, so sizeof(vbo) is not the size of the array, where vbo points to, but it's the size of the pointer. See sizeof. Note, the 3rd parameter of glBufferData has to be the size of the buffer in bytes.
The size of float* vbo = new float[78] is 78 * sizeof(float) or 78 * sizeof(*vbo)
and the size of int *element = new int[36] is 36 * sizeof(int) or 36 * sizeof(*element)
But sizeof(vbo) is the size of the pointer to the array or the same as sizeof(float*).
This means you have to change the code like this:
glBufferData(GL_ARRAY_BUFFER, 78*sizeof(float), vbo, GL_STATIC_DRAW);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 36*sizeof(int), element, GL_STATIC_DRAW);
The 2nd paramter of glDrawElements has to be the number of indices and the 3rd paramter has to be GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, or GL_UNSIGNED_INT, dependent on the data type of the indices:
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, NULL);
Note, you should prefer to use unsigned int * instead of int* for the data type of the index array (element).
Your assumption is wrong. You can't mix 24 vertex coordinates, 18 normal vectors and 36 colors, with differnt arrays of indices, directly in one Vertex Array Object.
The vertex attributes for each vertex position form a set of data. This means you have to create tuples of vertex coordinate, normal vector and color.
See further Rendering meshes with multiple indices
I recommend to use std::vector and to do it somehow like this:
#include <vector>
// 8 vertex coordiantes: 8 * 3 float
std::vector<float> v{
-1.0f, -1.0f, 1.0f,
-1.0f, 1.0f, 1.0f,
-1.0f, -1.0f, -1.0f,
-1.0f, 1.0f, -1.0f,
1.0f, -1.0f, 1.0f,
1.0f, 1.0f, 1.0f,
1.0f, -1.0f, -1.0f,
1.0f, 1.0f, -1.0f };
// 6 normal vectors: 6 * 3 float
std::vector<float> nv{
-1.0f, 0.0f, 0.0f,
0.0f, 0.0f, -1.0f,
1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f,
0.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f };
// 12 colors coordiantes: 12 * 3 float
std::vector<float> c{
0.583f, 0.771f, 0.014f,
0.609f, 0.115f, 0.436f,
0.327f, 0.483f, 0.844f,
0.822f, 0.569f, 0.201f,
0.435f, 0.602f, 0.223f,
0.310f, 0.747f, 0.185f,
0.597f, 0.770f, 0.761f,
0.559f, 0.436f, 0.730f,
0.359f, 0.583f, 0.152f,
0.483f, 0.596f, 0.789f,
0.559f, 0.861f, 0.639f,
0.195f, 0.548f, 0.859f };
// 12 faces 3*2 indices/face: 12 * 3 * 2 unsigned int
std::vector<unsigned int> indices{
2, 1, 3, 1, 1, 1,
4, 2, 7, 2, 3, 2,
8, 3, 5, 3, 7, 3,
6, 4, 1, 4, 5, 4,
7, 5, 1, 5, 3, 5,
4, 6, 6, 6, 8, 6,
2, 1, 4, 1, 3, 1,
4, 2, 8, 2, 7, 2,
8, 3, 6, 3, 5, 3,
6, 4, 2, 4, 1, 4,
7, 5, 5, 5, 1, 5,
4, 6, 2, 6, 6, 6 };
Create the vertex array data
// final vertex attributes 12 * 3 *(3 + 3 + 3) floats
// x0 y0 z0 nvx0 nvy0 nvz0 cr0 cg0 cb0
// x1 y1 z1 nvx1 nvy1 nvz1 cr1 cg1 cb1
std::vector<float> va;
const unsigned int no_of_faces = 12;
for (unsigned int f=0; f<no_of_faces; ++f )
{
for (unsigned int t=0; t<3; ++t )
{
unsigned int vi = indices[(f*3+t)*2]-1; // vertex index
unsigned int ni = indices[(f*3+t)*2+1]-1; // normal vector index
unsigned int ci = f; // color index
va.insert(va.end(), v.begin() + vi*3, v.begin() + vi*3+3); // insert vertex coordinate
va.insert(va.end(), nv.begin() + ni*3, nv.begin() + ni*3+3); // insert normal vector
va.insert(va.end(), c.begin() + ci*3, c.begin() + ci*3+3); // insert color
}
}
Create the Vertex Buffer Object:
GLuint vbo;
glGenBuffers( 1, &vbo );
glBindBuffer( GL_ARRAY_BUFFER, vbo );
glBufferData( GL_ARRAY_BUFFER, va.size()*sizeof(*va.data()), va.data(), GL_STATIC_DRAW );
Define the an arrays of generic vertex attribute data:
glVertexPointer( 3, GL_FLOAT, 9*sizeof(*va.data()), 0 );
glEnableClientState( GL_VERTEX_ARRAY );
glNormalPointer( GL_FLOAT, 9*sizeof(*va.data()), (void*)(3*sizeof(*va.data())) );
glEnableClientState( GL_NORMAL_ARRAY );
glColorPointer( 3, GL_FLOAT, 9*sizeof(*va.data()), (void*)(6*sizeof(*va.data())) );
glEnableClientState( GL_COLOR_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
Draw the array:
glDrawArrays( GL_TRIANGLES, 0, 36 );
Preview:

Trouble when using OpenGL .obj normals

I'm trying to render a cube using an exported.obj file by Blender but the lighting looks like wrong.
I think it's because my vertices array contains 8 values but the normals array has only 6.
I can not understand how OpenGL uses this index array. It's a little bit magic for me.
Can anyone help me?
This is the file:
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 -1.000000
v 1.000000 -1.000000 1.000000
v -1.000000 1.000000 0.999999
v -0.999999 1.000000 -1.000001
v 1.000000 1.000000 -1.000000
v 1.000000 1.000000 1.000000
vn 0.0000 -1.0000 0.0000
vn 0.0000 1.0000 -0.0000
vn -1.0000 -0.0000 0.0000
vn 0.0000 -0.0000 -1.0000
vn 1.0000 -0.0000 0.0000
vn -0.0000 0.0000 1.0000
f 2//1 4//1 1//1
f 8//2 6//2 5//2
f 5//3 2//3 1//3
f 6//4 3//4 2//4
f 3//5 8//5 4//5
f 1//6 8//6 5//6
f 2//1 3//1 4//1
f 8//2 7//2 6//2
f 5//3 6//3 2//3
f 6//4 7//4 3//4
f 3//5 7//5 8//5
f 1//6 4//6 8//6
This is my code:
GLuint cubeVAO, cubeVerticesVBO, cubeColorsVBO, cubeNormalsVBO, cubeIndicesVBO;
glGenVertexArrays(1, &cubeVAO);
glBindVertexArray(cubeVAO);
glGenBuffers(1, &cubeVerticesVBO);
glBindBuffer(GL_ARRAY_BUFFER, cubeVerticesVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_vertices), cube_vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), BUFFER_OFFSET(0));
glEnableVertexAttribArray(0);
glGenBuffers(1, &cubeColorsVBO);
glBindBuffer(GL_ARRAY_BUFFER, cubeColorsVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_colors), cube_colors, GL_STATIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), BUFFER_OFFSET(0));
glEnableVertexAttribArray(1);
glGenBuffers(1, &cubeNormalsVBO);
glBindBuffer(GL_ARRAY_BUFFER, cubeNormalsVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(cube_normals), cube_normals, GL_STATIC_DRAW);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), BUFFER_OFFSET(0));
glEnableVertexAttribArray(2);
glGenBuffers(1, &cubeIndicesVBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, cubeIndicesVBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(cube_indices), cube_indices, GL_STATIC_DRAW);
glBindVertexArray(cubeVAO);
cubeShader.use();
glm::mat4 model;
cubeShader.setMat4("model", model);
cubeShader.setMat4("view", view);
cubeShader.setMat4("projection", projection);
cubeShader.setVec3("lampColor", lampColor);
cubeShader.setVec3("lampPos", lampPos);
cubeShader.setVec3("viewPos", viewPos);
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, 0);
Vertex Shader
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec3 normal;
out vec3 Color;
out vec3 Normal;
out vec3 FragPos;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
vec4 posv4 = vec4(position, 1.0f);
Color = color;
FragPos = vec3(model * posv4);
Normal = normal;
gl_Position = projection*view*model*posv4;
}
Fragment Shader:
#version 330 core
in vec3 Color;
in vec3 Normal;
in vec3 FragPos;
out vec4 FragColor;
uniform vec3 lampColor;
uniform vec3 lampPos;
uniform vec3 viewPos;
void main()
{
// ambient
float ambientStrength = 0.1;
vec3 ambient = ambientStrength * lampColor;
// diffuse
vec3 norm = normalize(Normal);
vec3 lightDir = normalize(lampPos - FragPos);
float diff = max(dot(norm, lightDir), 0.0);
vec3 diffuse = diff * lampColor;
// specular
float specularStrength = 0.5;
vec3 viewDir = normalize(viewPos - FragPos);
vec3 reflectDir = reflect(-lightDir, norm);
float spec = pow(max(dot(viewDir, reflectDir), 0.0), 32);
vec3 specular = specularStrength * spec * lampColor;
vec3 result = (ambient + diffuse + specular) * Color;
FragColor = vec4(result, 1.0);
}
What's wrong? ,-,
Demonstration video:
https://vid.me/vy17h
You'll have to expand the data in the OBJ file. OpenGL expects a normal to be specified for each vertex, individually. In the OBJ file vertices and normals are treated separately. What you have to do is find the unique vertex+normal combinations, store that and index into that new vertex+normal array instead of using separate indices.
With modern OpenGL it is somewhat possible to use different indices for each vertex attribute by making use of vertex shader texture fetch, storing vertex data in textures. But this comes with a performance hit.
The normals array only has 6 values because the cube has 6 faces and you have created a model where each face has 4 normals at the corners pointing in the same direction (ie are identical) - which explains the lighting not looking how you expect.
The obj file uses indexing so it can assign one of these 6 unique values to each of the 8 vertices

Cube's edges in OpenGL

I have drawn a cube in OpenGl to use it as skybox.
I'm using a single texture image and I use the UV coordinates to locate the 6 faces in the right positions.
I send the texture with the following code:
//------------------------------------Luminance--------------------------------------------------------------
glPixelStorei(GL_UNPACK_ROW_LENGTH, m_width);
glBindTexture(GL_TEXTURE_2D, texturePointer[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_width, m_height, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_uYdata);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//--------------------------------------U component------------------------------------------------------------
glPixelStorei(GL_UNPACK_ROW_LENGTH, m_uvWidth);
glBindTexture(GL_TEXTURE_2D, texturePointer[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_uvWidth, m_uvHeight, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_uUdata);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//------------------------------------- V component-------------------------------------------------------------
glBindTexture(GL_TEXTURE_2D, texturePointer[2]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, m_uvWidth, m_uvHeight, 0, GL_LUMINANCE, GL_UNSIGNED_BYTE, m_uVdata);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
I can't remove the white edges between the faces of the cube, see the picture below:
GL_NEAREST remove the problem but the quality of the image is lower,
how can I use GL_LINEAR and remove this effect?
Edit:
The cube is created using the following Tutorial:
Model loading
Considering that the texture is made by six faces stitched in columns.
The actual Blender object is:
# Blender v2.77 (sub 0) OBJ File: 'demo_cube.blend'
# www.blender.org
o Cube
v 1.000000 -1.000000 -1.000000
v 1.0000000 -1.000000 1.000000
v -1.000000 -1.000000 1.000000
v -1.000000 -1.000000 -1.000000
v 1.000000 1.000000 -1.000000
v 1.0000000 1.000000 1.000000
v -1.000000 1.000000 1.000000
v -1.000000 1.000000 -1.000000
vt 0.833333333 0.0000
vt 0.666666666 1.0000
vt 0.833333333 1.0000
vt 0.833333333 0.0000
vt 1.000000000 1.0000
vt 1.000000000 0.0000
vt 0.333333333 1.0000
vt 0.500000000 0.0000
vt 0.333333333 0.0000
vt 0.500000000 1.0000
vt 0.666666666 0.0000
vt 0.500000000 0.0000
vt 0.000000000 0.0000
vt 0.166666666 1.0000
vt 0.166666666 0.0000
vt 0.333333333 0.0000
vt 0.166666666 1.0000
vt 0.333333333 1.0000
vt 0.666666666 0.0000
vt 0.833333333 1.0000
vt 0.500000000 1.0000
vt 0.666666666 1.0000
vt 0.000000000 1.0000
vt 0.166666666 0.0000
s off
f 2/1 4/2 1/3
f 8/4 6/5 5/6
f 5/7 2/8 1/9
f 6/10 3/11 2/12
f 3/13 8/14 4/15
f 1/16 8/17 5/18
f 2/1 3/19 4/2
f 8/4 7/20 6/5
f 5/7 6/21 2/8
f 6/10 7/22 3/11
f 3/13 7/23 8/14
f 1/16 4/24 8/17
//44 and 47
The fragment shader is:
#version 330 core
in vec2 oTexCoord;
uniform sampler2D yChannel;
uniform sampler2D uChannel;
uniform sampler2D vChannel;
out vec4 color;
const vec3 offset = vec3(0.0625, 0.5, 0.5);
const mat3 coef = mat3(
1.164, 0.0, 1.793,
1.164, -0.213, -0.533,
1.164, 2.112, 0.0
);
void main()
{
vec2 nTexCoord = vec2(oTexCoord.x, 1.0 - oTexCoord.y);
vec4 Tcolor = vec4(1.0);
if(oTexCoord.y <1 && oTexCoord.y > 0.0) {
if(oTexCoord.x < 1 && oTexCoord.x > 0.0) {
vec3 yuv = vec3(
texture(yChannel, nTexCoord).r,
texture(uChannel, nTexCoord).r,
texture(vChannel, nTexCoord).r
) - offset;
vec3 rgb = yuv * coef;
Tcolor = vec4(rgb, 1.0);
}
}
color = Tcolor;
}
I think you want to use cubemaps and GL_ARB_seamless_cube_map:
Although it is unlikely that the generated ( s t ) coordinate lies significantly outside the determined cube map face, it is often the case that the locations of the individual elements required during a linear sampling do not lie within the determined face, and their coordinates will therefore be modified by the selected clamping and wrapping rules. This often has the effect of producing seams or other discontinuities in the sampled texture.
This extension allows implementations to take samples from adjacent cube map faces, providing the ability to create seamless cube maps.
(emph. mine).
To use it, first check that you indeed have that extension (this depends on your platform and on which toolkit is eventually helping you into accessing OpenGL), then just write
glEnable(GL_TEXTURE_CUBE_MAP_SEAMLESS);
to enable it (globally).

how to use glDrawElements with glBindVertexArray properly

what am trying to do is using glDrawElements to draw without redundancy of vertices as follow:
Model ModelManager::CreateModel(std::vector<glm::vec3>&vertices, std::vector<uint16_t>&vertexIndeces)
{
//Vertecies
GLuint vertexArray;
glGenVertexArrays(1, &vertexArray);
glBindVertexArray(vertexArray);
GLuint vBufferId;
glGenBuffers(1, &vBufferId);
glBindBuffer(GL_ARRAY_BUFFER, vBufferId);
glBufferData(GL_ARRAY_BUFFER, vertices.size(), vertices.data(), GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
GLuint iBufferId;
glGenBuffers(1, &iBufferId);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, iBufferId);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, vertexIndeces.size(), vertexIndeces.data(), GL_STATIC_DRAW);
glBindVertexArray(0);
//
return Model(vertexArray, vBufferId, iBufferId, vertexIndeces.size());
}
and then when I draw :
void Model::Draw()
{
if (vertexArray)
{
isFinishedIniting = true;
glBindVertexArray(vertexArray);
glDrawElements(GL_TRIANGLES, elementCount, GL_UNSIGNED_SHORT, 0);
glBindVertexArray(0);
}
}
shaders:
#version 120
void main()
{
gl_Position= gl_ModelViewProjectionMatrix*gl_Vertex;
}
#version 120
void main()
{
gl_FragColor=vec4(1.0,0.0,0.0,0.0);
}
the obj file am trying to load is easy I hand made it :
v 0.0 0.0 0.0
v 1.0 1.0 0.0
v -1.0 1.0 0.0
v -1.0 -1.0 0.0
v 1.0 -1.0 0.0
f 1/1/1 2/1/1 3/1/1
f 1/1/1 4/1/1 5/1/1
so it should show two red Triangles ,but it's not drawing anything to the screen!
There are a couple of problems in this code:
The sizes passed to glBufferData() look wrong:
glBufferData(GL_ARRAY_BUFFER, vertices.size(), vertices.data(), GL_STATIC_DRAW);
...
glBufferData(GL_ELEMENT_ARRAY_BUFFER, vertexIndeces.size(), vertexIndeces.data(), GL_STATIC_DRAW);
Both vertices and vertexIndeces() are vectors. The .size() method on a vector gives the number of elements, while glBufferData() expects the size in bytes. To fix this, change the code to:
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(vertices[0]),
vertices.data(), GL_STATIC_DRAW);
...
glBufferData(GL_ELEMENT_ARRAY_BUFFER, vertexIndeces.size() * sizeof(vertexIndeces[0]),
vertexIndeces.data(), GL_STATIC_DRAW);
The API calls use a mix of fixed function attributes and generic vertex attributes. Based on the version in the shader code, and the shader code itself (particularly the use of gl_Vertex), you're using OpenGL 2.1 level shaders with fixed function attributes. Therefore, you need to use glEnableClientState() and glVertexPointer() instead of glEnableVertexAttribArray() and glVertexAttribPointer():
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(3, GL_FLOAT, 0, 0);
You can use generic vertex attributes, but then you need to declare a variable of type attribute in the vertex shader, instead of using gl_Vertex.

Using Eigen::Vector3f with Opengl ES 2.0 VBO

I am having trouble getting Eigen::Vector3f and Opengl ES 2.0 VBO to work together.
My initial attempt was glVertexAttribPointer(VERTEX, 3, GL_FLOAT , sizeof(Vertex), 0 ).
While this draws nothing if I start to play with the stride values I can see a broken mesh.
This is my current code which leads to crash. In my old code I was using simple vector3 class made from 3 floats which was working fine.
struct Vertex {
Eigen::Vector3f pos ;
Eigen::Vector3f normal;
};
std::vector<Vertex> Vertices;
std::vector<ushort16> Indices;
...
GLuint vao;
uint32 vboID, vboID2;
glGenVertexArraysOES(1, &vao);
glBindVertexArrayOES(vao);
glGenBuffers(1, &vboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glBufferData(GL_ARRAY_BUFFER, GL_STATIC_DRAW , sizeof(Vertex) * Vertices.size(), &Vertices[0]);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glGenBuffers(1, &vboID2);
glBindBuffer(GL_ARRAY_BUFFER, vboID2);
glBufferData(GL_ARRAY_BUFFER, GL_STATIC_DRAW , sizeof(ushort16) * m_vIndices.size(), &Indices[0]);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glEnableVertexAttribArray(VERTEX);
glVertexAttribPointer(VERTEX, 3, GL_FLOAT , sizeof(Vertex), &Vertices[0].pos );
glEnableVertexAttribArray(NORMAL);
glVertexAttribPointer(NORMAL, 3, GL_FLOAT , sizeof(Vertex), &Vertices[0].normal );
glBindBuffer(GL_ARRAY_BUFFER, vboID2);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArrayOES(0);
...
that solved my problem:
I removed the:
glBufferData(GL_ARRAY_BUFFER, GL_STATIC_DRAW , sizeof(Vertex) * Vertices.size(), &Vertices[0]);
And I changed these lines to
glVertexAttribPointer(VERTEX, 3, GL_FLOAT , sizeof(Vertex), Vertices[0].pos.data() );
....
glVertexAttribPointer(NORMAL, 3, GL_FLOAT , sizeof(Vertex), Vertices[0].normal.data() );