I am getting frustrated about this. I am trying to render a cube with Vertex Buffer Objects and I was learning about Projection so I am trying to make a frame of a cube. However, this code does not work. When I run the program on Code::Blocks, the program stops working.
I tried to find out the reason by commenting out the content of the render() method, and then the program doesn't stop working. So this line of code
glDrawElements(GL_TRIANGLES, m_index->size(), GL_UNSIGNED_INT, 0); // render the cube
may be the source of the problem. But I don't know how to fix this, because this is just what I usually do (I have made similar programs and they worked)
I really really appreciate your help!
ProjectionCube::ProjectionCube()
{
rotationAngle = 0.0;
}
bool ProjectionCube::initialize()
{
#ifdef _WIN32
glGenBuffers = (PFNGLGENBUFFERSARBPROC)wglGetProcAddress("glGenBuffers");
glBindBuffer = (PFNGLBINDBUFFERPROC)wglGetProcAddress("glBindBuffer");
glBufferData = (PFNGLBUFFERDATAPROC)wglGetProcAddress("glBufferData");
#else
glGenBuffers = (PFNGLGENBUFFERSARBPROC)glXGetProcAddress((const GLubyte*)"glGenBuffers");
glBindBuffer = (PFNGLBINDBUFFERPROC)glXGetProcAddress((const GLubyte*)"glBindBuffer");
glBufferData = (PFNGLBUFFERDATAPROC)glXGetProcAddress((const GLubyte*)"glBufferData");
#endif
if (!glGenBuffers || !glBindBuffer || !glBufferData)
{
std::cerr << "VBOs are not supported by your graphics card" << std::endl;
return false;
}
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
initializeVertexBuffers();
// bind vertex buffer and index buffer
// set vertex pointer to the buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_vbos[INDEX_BUFFER]);
glBindBuffer(GL_ARRAY_BUFFER, m_vbos[VERTEX_BUFFER]);
glVertexPointer(3, GL_FLOAT, 0, 0);
// no color buffer to bind
return true;
}
void ProjectionCube::initializeVertexBuffers()
{
const float size = 0.5f;
m_vertex = getCubeVertices(size);
for (int i = 0; i < m_vertex->size(); i++) {
std::cout << m_vertex->at(i) << ", ";
}
std::cout << std::endl;
static const unsigned int index[] = {//using triangles to render
0, 1, 2, 0, 2, 3, //bottom
0, 4, 5, 0, 1, 5, //back
0, 4, 7, 0, 3, 7, //left
1, 5, 6, 1, 2, 6, //right
4, 5, 6, 4, 7, 6, //top
2, 6, 7, 2, 3, 7}; // front
m_index = new vector<unsigned int>(index, index + sizeof(index) / sizeof(index[0]));
for (int i = 0; i < m_index->size(); i++) {
std::cout << m_index->at(i) << ", ";
}
std::cout << std::endl;
glGenBuffers(1, &m_vbos[VERTEX_BUFFER]);
glBindBuffer(GL_ARRAY_BUFFER, m_vbos[VERTEX_BUFFER]);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * m_vertex->size(),
&m_vertex->at(0), GL_STATIC_DRAW);
glGenBuffers(1, &m_vbos[INDEX_BUFFER]);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_vbos[INDEX_BUFFER]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * m_index->size(),
&m_index->at(0), GL_STATIC_DRAW);
}
void ProjectionCube::render(float m_x, float m_y, float m_z)
{
glTranslatef(m_x, m_y, m_z); // move to where the cube is located
//glRotatef(rotationAngle, 0.5f, 0.0f, 0.0f);
glDrawElements(GL_TRIANGLES, m_index->size(), GL_UNSIGNED_INT, 0); // render the cube
}
void ProjectionCube::animate(float dt)
{
const float SPEED = 15.0f;
rotationAngle += SPEED * dt;
if (rotationAngle >= 360 || rotationAngle <= 0) {
rotationAngle = -rotationAngle;
}
}
vector<GLfloat>* ProjectionCube::getCubeVertices(float r)
{
static const GLfloat vertices[] = {//bottom square
-r, -r, -r,
r, -r, -r,
r, -r, r,
-r, -r, r,
//top square
-r, r, -r,
r, r, -r,
r, r, r,
-r, r, r,};
vector<GLfloat>* result = new vector<GLfloat>(vertices, vertices + sizeof(vertices) / sizeof(vertices[0]));
return result;
}
Since you didn't post when render is called, all I can do is advise you into not using new vector. There is no need for it as far as I can see.
Since your error occurs when you use m_index in the render function, and assuming m_index is a pointer to a vector, then there is no need for it to be a pointer (assuming it's a member variable of ProjectionCube).
There are two issues with new vector. Why does your program dynamically allocate a in the getCubeVertices function? The following removes the dynamic allocation:
vector<GLfloat> ProjectionCube::getCubeVertices(float r)
{
static const GLfloat vertices[] = {//bottom square
-r, -r, -r,
r, -r, -r,
r, -r, r,
-r, -r, r,
//top square
-r, r, -r,
r, r, -r,
r, r, r,
-r, r, r,};
return vector<GLfloat>(vertices, vertices + sizeof(vertices) / sizeof(vertices[0]));
}
Then in InitializeVertexBuffers(), the m_vertex member variable is no longer a pointer, but an object:
std::vector<GLfloat> m_vertex; // assuming these are member variables in your class
std::vector<unsigned int> m_index;
//...
void ProjectionCube::initializeVertexBuffers()
{
const float size = 0.5f;
m_vertex = getCubeVertices(size);
//...
m_index = vector<unsigned int>(index, index + sizeof(index) / sizeof(index[0]));
Again, no need for new vector. You now use m_index. and m_vertex..
Now what does this buy you, as opposed to what you were doing before? Well, now you're guaranteed that m_index is valid when render is called. There is no way for m_index to have been deallocated, or the pointer corrupted, etc. since m_index is no longer a pointer.
You also rid yourself of a potential memory leak using the above approach.
Related
So today I wanted to learn how I can use a Vector2 & Color class to send data to OpenGL with pretty clean looking syntax.
The plan is this.
/*
Vector2 (8 bytes)
Color (16 bytes)
Vector2 + Color = 24 bytes
How can memory be laid out like this?
{Vector2, Color},
{Vector2, Color},
{Vector2, Color}
*/
So I have my two arrays of data.
Vector2 vertices[] = {
Vector2(0.0f, 0.5f),
Vector2(0.5f, -0.5f),
Vector2(-0.5f, -0.5f)
};
// colors would be mapped to there respective indexes in the vertices array (i.e colors[0] is mapped to vertices[0])
Color colors[] = {
Color(1.0f, 0.3f, 0.3f),
Color(0.3f, 1.0f, 0.3f),
Color(0.3f, 0.3f, 1.0f)
};
I was able to get a regular triangle rendering at the correct points,
but sending over the color data has been pretty difficult for me to pull off.
The result I currently get is this.
Here's a snippet of the code.
// positions on screen
Vector2 vertices[] = {
Vector2(0.0f, 0.5f),
Vector2(0.5f, -0.5f),
Vector2(-0.5f, -0.5f)
};
// colors for each position
Color colors[] = {
Color(1.0f, 0.3f, 0.3f),
Color(0.3f, 1.0f, 0.3f),
Color(0.3f, 0.3f, 1.0f)
};
// create vertex and array buffers (each bound automatically)
unsigned int vertexArray = createVertexArray();
unsigned int vertexBuffer = createBuffer(GL_ARRAY_BUFFER);
// allocate the data
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices) + sizeof(colors), nullptr, GL_STATIC_DRAW);
// fill up allocated memory
for (int i = 0; i < 3; ++i) {
glBufferSubData(GL_ARRAY_BUFFER, sizeof(Vector2) * i, sizeof(Vector2), &vertices[i]);
glBufferSubData(GL_ARRAY_BUFFER, (sizeof(Vector2) + sizeof(Color)) * (i + 1), sizeof(Color), &colors[i]);
}
// set up vertex attributes
glVertexAttribPointer(0, 2, GL_FLOAT, false, sizeof(Vector2) + sizeof(Color), nullptr);
glVertexAttribPointer(1, 3, GL_FLOAT, false, sizeof(Vector2) + sizeof(Color), (const void*)( sizeof(Vector2) ));
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
You need to add 1 vertex and 1 color alternately to the buffer:
for (int i = 0; i < 3; ++i)
{
GLintptr offsetV = i * (sizeof(Vector2) + sizeof(Color));
glBufferSubData(GL_ARRAY_BUFFER, offsetV, sizeof(Vector2), &vertices[i]);
GLintptr offsetC = offsetV + sizeof(Vector2);
glBufferSubData(GL_ARRAY_BUFFER, offsetC, sizeof(Color), &colors[i]);
}
In short, I created a function in c++ to create a vertex buffer array and an index buffer array for me based on a vector of vertexes i.e. if you enter 4 points in the vector of vertexes the function should in theory return 2 arrays to be used in the vertex buffer and index buffer. However, this is where the problem arises. After the function returns the arrays, buffers initialised and glDrawElements is called, only one of the 2 (for a square) triangles that make up the square are drawn. I am very confused why.
Here is the code,
Struct defining the stuff the function returns:
struct ResultDataBuffer {
std::vector<float> positions;
std::vector<unsigned int> indices;
unsigned int psize;
unsigned int isize;
unsigned int bpsize;
unsigned int bisize;
};
//positions and indices are the vectors for the buffers (to be converted to arrays)
//psize and isize are the sizes of the vectors
//bpsize and bisize are the byte size of the vectors (i.e. sizeof())
The function itself:
static ResultDataBuffer CalculateBuffers(std::vector<float> vertixes) {
std::vector<float> positions = vertixes;
std::vector<unsigned int> indices;
int length = vertixes.size();
int l = length / 2;
int i = 0;
while (i < l - 2) { //The logic for the index buffer array. If the length of the vertexes is l, this array(vector here) should be 0,1,2 , 0,2,3 ... 0,l-2,l-1
i += 1;
indices.push_back(0);
indices.push_back(i + 1);
indices.push_back(i + 2);
}
return{ vertixes,indices, positions.size(), indices.size(), sizeof(float)*positions.size(), sizeof(unsigned int)*indices.size() };
}
The code in the main function (defenitions of buffers and stuff):
std::vector<float> vertixes = {
-0.5f, -0.5f,
0.5f, -0.5f,
0.5f, 0.5f,
-0.5f, 0.5f,
};
ResultDataBuffer rdb = CalculateBuffers(vertixes);
float* positions = rdb.positions.data(); //Convert vector into array
unsigned int* indices = rdb.indices.data(); //Ditto above
unsigned int buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, rdb.bpsize, positions, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, sizeof(float) * 2, 0);
unsigned int ibo;
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, rdb.bisize, indices, GL_STATIC_DRAW);
Stuff in the main function (in game loop):
glDrawElements(GL_TRIANGLES, rdb.isize, GL_UNSIGNED_INT, nullptr);
Apologies for the length of this post, I tried to cut it down.
C++ full code: https://pastebin.com/ZGLSQm3b
Shader code (located in /res/shaders/Basic.shader): https://pastebin.com/C1ahVUD9
So to summarize, this code, instead of drawing a square - 2 triangles, draws one triangle only.
The issue is caused by the loop which generates the array of indices:
while (i < l - 2) {
i += 1;
indices.push_back(0);
indices.push_back(i + 1);
indices.push_back(i + 2);
}
This loop generates the indices
0, 2, 3, 0, 3, 4
But you have to generate the indices
0, 1, 2, 0, 2, 3
This is cause, because the control variable is incremented, before the indices are append to the vector.
Increment the control variable at the end of the loop, to solve the issue:
while (i < l - 2) {
indices.push_back(0);
indices.push_back(i + 1);
indices.push_back(i + 2);
i += 1;
}
Or use a for loop:
for (unsigned int i = 0; i < l-2; ++ i)
{
unsigned int t[]{ 0, i+1, i+2 };
indices.insert(indices.end(), t, t+3);
}
I'm brand new to OpenGL and am having some difficulty rendering multiple objects.
I have a vector each of which has its own VertexBuffer. Then, in the while loop I draw each shape on its own.
It's all well and good when I have many of the same objects (multiple cubes etc.) however, when I add a triangle mesh everything gets all out of whack.
I can have many cubes
I can have a single triangle mesh:
But, when I try to have a cube and then a triangle mesh I get:
I'm totally at a loss for what's going on. The code for my loop is provided below.
while (!glfwWindowShouldClose(window))
{
// Get the size of the window
int width, height;
glfwGetWindowSize(window, &width, &height);
float aspect_ratio = 1 * float(height)/float(width); // corresponds to the necessary width scaling
double xpos, ypos;
glfwGetCursorPos(window, &xpos, &ypos);
// Clear the framebuffer
glClearColor(0.5f, 0.5f, 0.5f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Enable depth test
glEnable(GL_DEPTH_TEST);
glUniform3f(program.uniform("triangleColor"), 1.0f, 1.0f, 1.0f);
glUniformMatrix4fv(program.uniform("proj"), 1, GL_FALSE, projection.data());
glUniformMatrix4fv(program.uniform("view"), 1, GL_FALSE, view.data());
int tally = 0;
for (int i = 0; i < surfaces.size(); i++) {
Surface *s = surfaces[i];
Vector3f color = s->getColor();
int tempIndex = triangleIndex;
Matrix4f model = s->getModel();
// Convert screen position to world coordinates
double xworld = ((xpos/double(width))*2)-1;
double yworld = (((height-1-ypos)/double(height))*2)-1; // NOTE: y axis is flipped in glfw
if (isPressed && mode == "translate") {
if(tempIndex == i) {
Vector4f center = s->getCenter() + model.col(3);
Vector4f displacement = Vector4f(xworld, yworld, 0, 1) - center;
Matrix4f translation = translateMatrix(displacement(0), displacement(1), displacement(2));
model = translation * s->getModel();
s->setModel(model);
}
}
glUniform3f(program.uniform("triangleColor"), color(0), color(1), color(2));
glUniformMatrix4fv(program.uniform("model"), 1, GL_FALSE, model.data());
glDrawArrays(GL_TRIANGLES, 0, s->getVertices().size());
}
And I initialize each VBO when making the object as
VertexBufferObject VBO;
VBO.init();
VBO.update(Vertices);
program.bindVertexAttribArray("position", VBO);
Surface* s = new Surface(VBO, Vertices, percentScale, 0, transformedCenter, SmoothNormals, FlatNormals, color);
s->setModel(model);
surfaces.push_back(s);
And where Program::bindVertexAttribArray is defined as
GLint Program::bindVertexAttribArray(
const std::string &name, VertexBufferObject& VBO) const
{
GLint id = attrib(name);
if (id < 0)
return id;
if (VBO.id == 0)
{
glDisableVertexAttribArray(id);
return id;
}
VBO.bind();
glEnableVertexAttribArray(id);
glVertexAttribPointer(id, VBO.rows, GL_FLOAT, GL_FALSE, 0, 0);
check_gl_error();
return id;
}
You're not binding any buffers before the draw call. You're probably simply drawing whatever buffer you last bound when you initialised them. You'll need something like this at the end of your loop before glDrawArrays:
...
program.bindVertexAttribArray("position", VBO); // where VBO is the buffer of surface s
glUniform3f(program.uniform("triangleColor"), color(0), color(1), color(2));
glUniformMatrix4fv(program.uniform("model"), 1, GL_FALSE, model.data());
glDrawArrays(GL_TRIANGLES, 0, s->getVertices().size());
I am trying to render Utah teapot using Bèzier Surfaces and kind of succeeded in that. Yet, I have some defects that is totally undesirable. As you can see here, the teapot looks OK, but there exists some unnecessary 'interpolation'. What do you think that may have caused this? By the way, when I write the object into a .ply file, I could managed to open it using meshlab with no problem and no defect. Lastly, I'm using Xcode as my development environment.
I really appreciate any help you can provide.
EDIT: Some parts of the code are added. First of all, the control points and the patches used are retrieved from this old paper.
struct position3D { GLfloat x, y, z; };
struct position3D teapot_cp_vertices[] = {
// 1
{ 1.4 , 0.0 , 2.4 },
{ 1.4 , -0.784 , 2.4 },
{ 0.784 , -1.4 , 2.4 },
{ 0.0 , -1.4 , 2.4 },
.
.
.
}
#define TEAPOT_NB_PATCHES 32
#define ORDER 3
unsigned short teapot_patches[][ORDER + 1][ORDER + 1] =
{
// rim
{ { 1, 2, 3, 4 },{ 5, 6, 7, 8 },{ 9, 10, 11, 12 },{ 13, 14, 15, 16, } },
{ { 4, 17, 18, 19 },{ 8, 20, 21, 22 },{ 12, 23, 24, 25 },{ 16, 26, 27, 28, } },
{ { 19, 29, 30, 31 },{ 22, 32, 33, 34 },{ 25, 35, 36, 37 },{ 28, 38, 39, 40, } },
{ { 31, 41, 42, 1 },{ 34, 43, 44, 5 },{ 37, 45, 46, 9 },{ 40, 47, 48, 13, } },
...
...
...
} }
}
All the points and patches can be found in the mentioned paper.
The vertices and the triangles used to render the teapot is calculated using:
int factorial(int n)
{
assert(n >= 0);
return (n == 1 || n == 0) ? 1 : factorial(n - 1) * n;
}
float binomial_coefficient(int i, int n) {
assert(i >= 0); assert(n >= 0);
return 1.0f * factorial(n) / (factorial(i) * factorial(n-i));
}
float bernstein_polynomial(int i, int n, float u) {
return binomial_coefficient(i, n) * powf(u, i) * powf(1-u, n-i);
}
void build_control_points_k(int p, struct position3D control_points_k[][ORDER+1]) {
for (int i = 0; i <= ORDER; i++) {
for (int j = 0; j <= ORDER; j++) {
control_points_k[i][j] = teapot_cp_vertices[teapot_patches[p][i][j] - 1];
}
}
}
Vertex compute_position(struct position3D control_points_k[][ORDER+1], float u, float v) {
Vertex result = *new Vertex();
for (int i = 0; i <= ORDER; i++) {
float poly_i = bernstein_polynomial(i, ORDER, u);
for (int j = 0; j <= ORDER; j++) {
float poly_j = bernstein_polynomial(j, ORDER, v);
result.x += poly_i * poly_j * control_points_k[i][j].x;
result.y += poly_i * poly_j * control_points_k[i][j].y;
result.z += poly_i * poly_j * control_points_k[i][j].z;
result.r = 0; //default colour
result.g = 0;
result.b = 0;
}
}
return result;
}
#define RESU 10 //resolution in u axis
#define RESV 10 //resolution in v axis
void Object3D::build_teapot() {
vlist = new Vertex[TEAPOT_NB_PATCHES * RESU*RESV]; //vertex list
tlist = new Triangle[TEAPOT_NB_PATCHES * (RESU-1)*(RESV-1) * 2]; //triangle list
//calculate vertices
for (int p = 0; p < TEAPOT_NB_PATCHES; p++) {
struct position3D control_points_k[ORDER+1][ORDER+1];
build_control_points_k(p, control_points_k);
for (int ru = 0; ru <= RESU-1; ru++) {
float u = 1.0 * ru / (RESU-1);
for (int rv = 0; rv <= RESV-1; rv++) {
float v = 1.0 * rv / (RESV-1);
vlist[p*RESU*RESV + ru*RESV + rv] = compute_position(control_points_k, u, v);
vlist[p*RESU*RESV + ru*RESV + rv].r = 1.0 * p / TEAPOT_NB_PATCHES;
vlist[p*RESU*RESV + ru*RESV + rv].g = 1.0 * p / TEAPOT_NB_PATCHES;
vlist[p*RESU*RESV + ru*RESV + rv].b = 0.7;
}
}
}
//calculate triangle vertex orders or namely triangles
int n = 0;
Triangle tmpTrg = *new Triangle();
tmpTrg.nverts = 3;
for (int p = 0; p < TEAPOT_NB_PATCHES; p++) {
for (int ru = 0; ru < RESU-1; ru++)
for (int rv = 0; rv < RESV-1; rv++) {
// ABCD is a square
// triangle in the order ABC is the first one
tmpTrg.verts = new int[tmpTrg.nverts];
tmpTrg.verts[0] = p*RESU*RESV + ru *RESV + rv ;
tmpTrg.verts[1] = p*RESU*RESV + ru *RESV + (rv+1);
tmpTrg.verts[2] = p*RESU*RESV + (ru+1)*RESV + (rv+1);
tlist[n] = tmpTrg;
n++;
// triangle in the order CDA is the second one
tmpTrg.verts = new int[tmpTrg.nverts];
tmpTrg.verts[0] = p*RESU*RESV + (ru+1)*RESV + (rv+1);
tmpTrg.verts[1] = p*RESU*RESV + (ru+1)*RESV + rv ;
tmpTrg.verts[2] = p*RESU*RESV + ru *RESV + rv ;
tlist[n] = tmpTrg;
n++;
}
}
}
Here is GL initialisation function:
void init(int w, int h)
{
// Init GLFW
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// macOSX requirement :
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_SAMPLES, 4);
window = glfwCreateWindow(w, h, "OpenGLTeapot", nullptr, nullptr); // Windowed
glfwMakeContextCurrent(window);
glfwSetKeyCallback(window, key_callback);
// Initialize GLEW to setup the OpenGL Function pointers
glewExperimental = GL_TRUE;
glewInit();
// Define the viewport dimensions
glViewport(0, 0, w, h);
// Enable depth test
glEnable(GL_DEPTH_TEST);
}
EDIT 2 : I tried to run the code on a Windows computer using Visual Studio15 as and there is no defect like this. Does anybody have any idea about what may cause such a stupid problem?
EDIT 3: Here is the object creation code:
void Object3D::CreateObject()
{
int attributeCount = 6;
vertexCount = TEAPOT_NB_PATCHES * RESU*RESV;
triangleCount = TEAPOT_NB_PATCHES * (RESU-1)*(RESV-1) * 2;
build_teapot();
//Bind the vertex and index buffers
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
// Bind our Vertex Array Object first, then bind and set our buffers and pointers.
glBindVertexArray(VAO);
//Convert our vertex list into a continuous array, copy the vertices into the vertex buffer.
float* vertexData = new float[vertexCount * attributeCount];
for (int i = 0; i < vertexCount; i++)
memcpy(&vertexData[i*attributeCount],
vlist[i].getAsArray(), sizeof(float)*attributeCount);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(float)*attributeCount*vertexCount, vertexData, GL_STATIC_DRAW);
//Copy the index data found in the list of triangles into the element array buffer (index array)
//We are using a triangles, so we need triangleCount * 3 indices.
int* indexData = new int[triangleCount * 3];
for (int i = 0; i < triangleCount; i++)
memcpy(&indexData[i * 3], tlist[i].verts, sizeof(int) * 3);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(int)*3*triangleCount, indexData, GL_STATIC_DRAW);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, attributeCount * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, attributeCount * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
// Unbind VAO
glBindVertexArray(0);
// Delete temporary buffers
delete[] vertexData;
delete[] indexData;
}
And the main function:
int main()
{
int screenWidth = 800;
int screenHeight = 600;
init(screenWidth, screenHeight);
std::vector<Object3D*> listOfObjects;
Object3D* pObj = new Object3D();
pObj->CreateObject();
listOfObjects.push_back(pObj);
//Create the shaders.
Shader shader(VertexShaderPath, FragmentShaderPath);
while (!glfwWindowShouldClose(window))
{
glfwPollEvents();
// Clear the colorbuffer
glClearColor(0.0f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use the shader
shader.Use();
// Transformations
// Create camera transformation
glm::mat4 view;
glm::vec3 cameraPos = glm::vec3(0.0f, 150.0f, 100.0f);
glm::vec3 cameraTarget = glm::vec3(0.0, 80.0f, 20.0f);
glm::vec3 cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
view = glm::lookAt(cameraPos, cameraTarget, cameraUp);
// Create projection transformation
glm::mat4 projection;
projection = glm::perspective<float>(90.0, (float)screenWidth / (float)screenHeight, 0.1f, 1000.0f);
// Get the uniform locations
GLint modelLoc = glGetUniformLocation(shader.Program, "model");
GLint viewLoc = glGetUniformLocation(shader.Program, "view");
GLint projLoc = glGetUniformLocation(shader.Program, "projection");
// Pass the view and projection matrices to the shaders
glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view));
glUniformMatrix4fv(projLoc, 1, GL_FALSE, glm::value_ptr(projection));
// Put the bottom of the object on XZ plane and scale it up
pObj->modelMatrix = glm::scale(glm::mat4(1.0f), glm::vec3(30.0f));
pObj->modelMatrix = glm::rotate(pObj->modelMatrix, -90.0f, glm::vec3(1.0f, 0.0f,0.0f));
for (auto pObj : listOfObjects)
{
glBindVertexArray(pObj->VAO);
glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(pObj->modelMatrix));
glDrawElements(GL_TRIANGLES, (pObj->vertexCount) * 6, GL_UNSIGNED_INT, 0);
//glDrawElements(GL_TRIANGLES, 50, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
// Swap the buffers
glfwSwapBuffers(window);
}
for (auto pObj : listOfObjects)
{
glDeleteVertexArrays(1, &pObj->VAO);
glDeleteBuffers(1, &pObj->VBO);
glDeleteBuffers(1, &pObj->EBO);
delete pObj;
}
glfwTerminate();
return 0;
}
The problem seems to be about the offset of glDrawElements function. When I changed that line to:
glDrawElements(GL_TRIANGLES, (listOfObjects[i]->vertexCount) * 6, GL_UNSIGNED_INT, (void*)(sizeof(Vertex)));
it renders properly.
I've defined a block of vertex data like so:
struct vertex {
float x, y, u, v;
};
struct vertex_group {
vertex tl, bl, br, tr;
glm::vec4 color;
vertex_group(float x, float y, float width, float height, glm::vec4 c) {
tl.x = x; tl.y = y + height; tl.u = 0; tl.v = 0;
bl.x = x; bl.y = y; bl.u = 0; bl.v = 1;
br.x = x + width; br.y = y; br.u = 1; br.v = 1;
tr.x = x + width; tr.y = y + height; tr.u = 1; tr.v = 0;
color = c;
}
vertex_group(positioned_letter const& l) :
vertex_group(l.x, l.y, l.width, l.height, l.l.color) {
}
const float * data() const {
return &tl.x;
}
};
The attribute pointers are set like this:
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)(4 * 4 * sizeof(GLfloat)));
And the draw code is invoked like so:
vertex_group vertices(l);
glBindTexture(GL_TEXTURE_2D, g.texture);
glBindBuffer(GL_ARRAY_BUFFER, objects.rect_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices.data(), GL_STREAM_DRAW);
glDrawArrays(GL_QUADS, 0, 4);
The basic idea is that all four vertices of the quad should all be using the same data for color, even as they require different values for position and texture data. However, when I set the color to red (1,0,0,1), the results on screen are.... not quite right.
Just for reference sake, if the *only changes I make are to the first two sections of code, to the following:
struct vertex {
float x, y, u, v;
};
struct vertex_group {
vertex tl;
glm::vec4 color1;
vertex bl;
glm::vec4 color2;
vertex br;
glm::vec4 color3;
vertex tr;
glm::vec4 color4;
vertex_group(float x, float y, float width, float height, glm::vec4 c) {
tl.x = x; tl.y = y + height; tl.u = 0; tl.v = 0;
bl.x = x; bl.y = y; bl.u = 0; bl.v = 1;
br.x = x + width; br.y = y; br.u = 1; br.v = 1;
tr.x = x + width; tr.y = y + height; tr.u = 1; tr.v = 0;
color1 = color2 = color3 = color4 = c;
}
vertex_group(positioned_letter const& l) :
vertex_group(l.x, l.y, l.width, l.height, l.l.color) {
}
const float * data() const {
return &tl.x;
}
};
(other part)
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (void*)(4 * sizeof(GLfloat)));
It renders correctly:
So in a nutshell, my question is: I'd like to structure my data (and render with it) like xyuvxyuvxyuvxyuvrgba but the only way I can get it to work is by doing xyuvrgbaxyuvrgbaxyuvrgbaxyuvrgba. How do I set my pointers/call the draw function so that I can use the first method?
You can't do that. But you can achieve this layout using instanced rendering:
xyuvxyuvxyuvxyuv // <- only once
whrgbawhrgbawhrgbawhrgba... // <- repeated per glyph
where w and h are the sizes of each quad that are to be applied in the vertex shader.
Here I split it in two buffers, but you can technically load it all into one buffer. Also I use the OpenGL 4.5 bindless API here because I think that it is easier to use. If you don't have it yet then you can change it to use the older calls accordingly.
float quad[] = {
0, 1, 0, 0,
0, 0, 0, 1,
1, 1, 1, 0,
1, 0, 1, 1,
};
struct Instance {
vec2 size;
// TODO: add index of the glyph you want to render
vec4 color;
};
Instance inst[] = { ... };
int ninst = sizeof(inst)/sizeof(inst[0]);
GLuint quad_buf = ... create buffer from quad[] ...;
GLuint inst_buf = ... create buffer from inst[] ...;
GLuint vao;
glCreateVertexArrays(1, &vao);
glEnableVertexArrayAttrib(vao, 0);
glVertexArrayAttribFormat(vao, 0, 4, GL_FLOAT, GL_FALSE, 0);
glVertexArrayAttribBinding(vao, 0, 0); // from 0th buffer
glEnableVertexArrayAttrib(vao, 1);
glVertexArrayAttribFormat(vao, 1, 2, GL_FLOAT, GL_FALSE, offsetof(Instance, size));
glVertexArrayAttribBinding(vao, 1, 1); // from 1st buffer
glEnableVertexArrayAttrib(vao, 2);
glVertexArrayAttribFormat(vao, 2, 4, GL_FLOAT, GL_FALSE, offsetof(Instance, color));
glVertexArrayAttribBinding(vao, 2, 1); // from 1st buffer
glVertexArrayVertexBuffer(vao, 0, quad_buf, 0, sizeof(float)*4); // 0th buffer is the quad
glVertexArrayVertexBuffer(vao, 1, inst_buf, 0, sizeof(Instance)); // 1th buffer for instances
glVertexArrayBindingDivisor(vao, 1, 1); // 1st buffer advances once per instance
// to draw:
glBindTexture(...);
glBindVertexArray(vao);
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, ninst);