OpenGL defect - undesirable interpolation - c++

I am trying to render Utah teapot using Bèzier Surfaces and kind of succeeded in that. Yet, I have some defects that is totally undesirable. As you can see here, the teapot looks OK, but there exists some unnecessary 'interpolation'. What do you think that may have caused this? By the way, when I write the object into a .ply file, I could managed to open it using meshlab with no problem and no defect. Lastly, I'm using Xcode as my development environment.
I really appreciate any help you can provide.
EDIT: Some parts of the code are added. First of all, the control points and the patches used are retrieved from this old paper.
struct position3D { GLfloat x, y, z; };
struct position3D teapot_cp_vertices[] = {
// 1
{ 1.4 , 0.0 , 2.4 },
{ 1.4 , -0.784 , 2.4 },
{ 0.784 , -1.4 , 2.4 },
{ 0.0 , -1.4 , 2.4 },
.
.
.
}
#define TEAPOT_NB_PATCHES 32
#define ORDER 3
unsigned short teapot_patches[][ORDER + 1][ORDER + 1] =
{
// rim
{ { 1, 2, 3, 4 },{ 5, 6, 7, 8 },{ 9, 10, 11, 12 },{ 13, 14, 15, 16, } },
{ { 4, 17, 18, 19 },{ 8, 20, 21, 22 },{ 12, 23, 24, 25 },{ 16, 26, 27, 28, } },
{ { 19, 29, 30, 31 },{ 22, 32, 33, 34 },{ 25, 35, 36, 37 },{ 28, 38, 39, 40, } },
{ { 31, 41, 42, 1 },{ 34, 43, 44, 5 },{ 37, 45, 46, 9 },{ 40, 47, 48, 13, } },
...
...
...
} }
}
All the points and patches can be found in the mentioned paper.
The vertices and the triangles used to render the teapot is calculated using:
int factorial(int n)
{
assert(n >= 0);
return (n == 1 || n == 0) ? 1 : factorial(n - 1) * n;
}
float binomial_coefficient(int i, int n) {
assert(i >= 0); assert(n >= 0);
return 1.0f * factorial(n) / (factorial(i) * factorial(n-i));
}
float bernstein_polynomial(int i, int n, float u) {
return binomial_coefficient(i, n) * powf(u, i) * powf(1-u, n-i);
}
void build_control_points_k(int p, struct position3D control_points_k[][ORDER+1]) {
for (int i = 0; i <= ORDER; i++) {
for (int j = 0; j <= ORDER; j++) {
control_points_k[i][j] = teapot_cp_vertices[teapot_patches[p][i][j] - 1];
}
}
}
Vertex compute_position(struct position3D control_points_k[][ORDER+1], float u, float v) {
Vertex result = *new Vertex();
for (int i = 0; i <= ORDER; i++) {
float poly_i = bernstein_polynomial(i, ORDER, u);
for (int j = 0; j <= ORDER; j++) {
float poly_j = bernstein_polynomial(j, ORDER, v);
result.x += poly_i * poly_j * control_points_k[i][j].x;
result.y += poly_i * poly_j * control_points_k[i][j].y;
result.z += poly_i * poly_j * control_points_k[i][j].z;
result.r = 0; //default colour
result.g = 0;
result.b = 0;
}
}
return result;
}
#define RESU 10 //resolution in u axis
#define RESV 10 //resolution in v axis
void Object3D::build_teapot() {
vlist = new Vertex[TEAPOT_NB_PATCHES * RESU*RESV]; //vertex list
tlist = new Triangle[TEAPOT_NB_PATCHES * (RESU-1)*(RESV-1) * 2]; //triangle list
//calculate vertices
for (int p = 0; p < TEAPOT_NB_PATCHES; p++) {
struct position3D control_points_k[ORDER+1][ORDER+1];
build_control_points_k(p, control_points_k);
for (int ru = 0; ru <= RESU-1; ru++) {
float u = 1.0 * ru / (RESU-1);
for (int rv = 0; rv <= RESV-1; rv++) {
float v = 1.0 * rv / (RESV-1);
vlist[p*RESU*RESV + ru*RESV + rv] = compute_position(control_points_k, u, v);
vlist[p*RESU*RESV + ru*RESV + rv].r = 1.0 * p / TEAPOT_NB_PATCHES;
vlist[p*RESU*RESV + ru*RESV + rv].g = 1.0 * p / TEAPOT_NB_PATCHES;
vlist[p*RESU*RESV + ru*RESV + rv].b = 0.7;
}
}
}
//calculate triangle vertex orders or namely triangles
int n = 0;
Triangle tmpTrg = *new Triangle();
tmpTrg.nverts = 3;
for (int p = 0; p < TEAPOT_NB_PATCHES; p++) {
for (int ru = 0; ru < RESU-1; ru++)
for (int rv = 0; rv < RESV-1; rv++) {
// ABCD is a square
// triangle in the order ABC is the first one
tmpTrg.verts = new int[tmpTrg.nverts];
tmpTrg.verts[0] = p*RESU*RESV + ru *RESV + rv ;
tmpTrg.verts[1] = p*RESU*RESV + ru *RESV + (rv+1);
tmpTrg.verts[2] = p*RESU*RESV + (ru+1)*RESV + (rv+1);
tlist[n] = tmpTrg;
n++;
// triangle in the order CDA is the second one
tmpTrg.verts = new int[tmpTrg.nverts];
tmpTrg.verts[0] = p*RESU*RESV + (ru+1)*RESV + (rv+1);
tmpTrg.verts[1] = p*RESU*RESV + (ru+1)*RESV + rv ;
tmpTrg.verts[2] = p*RESU*RESV + ru *RESV + rv ;
tlist[n] = tmpTrg;
n++;
}
}
}
Here is GL initialisation function:
void init(int w, int h)
{
// Init GLFW
glfwInit();
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// macOSX requirement :
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_SAMPLES, 4);
window = glfwCreateWindow(w, h, "OpenGLTeapot", nullptr, nullptr); // Windowed
glfwMakeContextCurrent(window);
glfwSetKeyCallback(window, key_callback);
// Initialize GLEW to setup the OpenGL Function pointers
glewExperimental = GL_TRUE;
glewInit();
// Define the viewport dimensions
glViewport(0, 0, w, h);
// Enable depth test
glEnable(GL_DEPTH_TEST);
}
EDIT 2 : I tried to run the code on a Windows computer using Visual Studio15 as and there is no defect like this. Does anybody have any idea about what may cause such a stupid problem?
EDIT 3: Here is the object creation code:
void Object3D::CreateObject()
{
int attributeCount = 6;
vertexCount = TEAPOT_NB_PATCHES * RESU*RESV;
triangleCount = TEAPOT_NB_PATCHES * (RESU-1)*(RESV-1) * 2;
build_teapot();
//Bind the vertex and index buffers
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
// Bind our Vertex Array Object first, then bind and set our buffers and pointers.
glBindVertexArray(VAO);
//Convert our vertex list into a continuous array, copy the vertices into the vertex buffer.
float* vertexData = new float[vertexCount * attributeCount];
for (int i = 0; i < vertexCount; i++)
memcpy(&vertexData[i*attributeCount],
vlist[i].getAsArray(), sizeof(float)*attributeCount);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(float)*attributeCount*vertexCount, vertexData, GL_STATIC_DRAW);
//Copy the index data found in the list of triangles into the element array buffer (index array)
//We are using a triangles, so we need triangleCount * 3 indices.
int* indexData = new int[triangleCount * 3];
for (int i = 0; i < triangleCount; i++)
memcpy(&indexData[i * 3], tlist[i].verts, sizeof(int) * 3);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(int)*3*triangleCount, indexData, GL_STATIC_DRAW);
// Position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, attributeCount * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Color attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, attributeCount * sizeof(GLfloat), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
// Unbind VAO
glBindVertexArray(0);
// Delete temporary buffers
delete[] vertexData;
delete[] indexData;
}
And the main function:
int main()
{
int screenWidth = 800;
int screenHeight = 600;
init(screenWidth, screenHeight);
std::vector<Object3D*> listOfObjects;
Object3D* pObj = new Object3D();
pObj->CreateObject();
listOfObjects.push_back(pObj);
//Create the shaders.
Shader shader(VertexShaderPath, FragmentShaderPath);
while (!glfwWindowShouldClose(window))
{
glfwPollEvents();
// Clear the colorbuffer
glClearColor(0.0f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Use the shader
shader.Use();
// Transformations
// Create camera transformation
glm::mat4 view;
glm::vec3 cameraPos = glm::vec3(0.0f, 150.0f, 100.0f);
glm::vec3 cameraTarget = glm::vec3(0.0, 80.0f, 20.0f);
glm::vec3 cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
view = glm::lookAt(cameraPos, cameraTarget, cameraUp);
// Create projection transformation
glm::mat4 projection;
projection = glm::perspective<float>(90.0, (float)screenWidth / (float)screenHeight, 0.1f, 1000.0f);
// Get the uniform locations
GLint modelLoc = glGetUniformLocation(shader.Program, "model");
GLint viewLoc = glGetUniformLocation(shader.Program, "view");
GLint projLoc = glGetUniformLocation(shader.Program, "projection");
// Pass the view and projection matrices to the shaders
glUniformMatrix4fv(viewLoc, 1, GL_FALSE, glm::value_ptr(view));
glUniformMatrix4fv(projLoc, 1, GL_FALSE, glm::value_ptr(projection));
// Put the bottom of the object on XZ plane and scale it up
pObj->modelMatrix = glm::scale(glm::mat4(1.0f), glm::vec3(30.0f));
pObj->modelMatrix = glm::rotate(pObj->modelMatrix, -90.0f, glm::vec3(1.0f, 0.0f,0.0f));
for (auto pObj : listOfObjects)
{
glBindVertexArray(pObj->VAO);
glUniformMatrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(pObj->modelMatrix));
glDrawElements(GL_TRIANGLES, (pObj->vertexCount) * 6, GL_UNSIGNED_INT, 0);
//glDrawElements(GL_TRIANGLES, 50, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
// Swap the buffers
glfwSwapBuffers(window);
}
for (auto pObj : listOfObjects)
{
glDeleteVertexArrays(1, &pObj->VAO);
glDeleteBuffers(1, &pObj->VBO);
glDeleteBuffers(1, &pObj->EBO);
delete pObj;
}
glfwTerminate();
return 0;
}

The problem seems to be about the offset of glDrawElements function. When I changed that line to:
glDrawElements(GL_TRIANGLES, (listOfObjects[i]->vertexCount) * 6, GL_UNSIGNED_INT, (void*)(sizeof(Vertex)));
it renders properly.

Related

Polygon tearing in OpenGL

500x500 grid with 1000 sub Divisions:
Just one question.
Why is this happening ?
#include <iostream>
#include <sstream>
#include <vector>
#define GLEW_STATIC
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "glm/glm.hpp"
#include "glm/gtc/matrix_transform.hpp"
#include "GameEngine.hpp"
#include "ShaderProgram.h"
#include "Camera.h"
#include "Mesh.h"
const char *title = "Terrain";
GameEngine engine;
OrbitCamera orbitCamera;
float gYaw = 0.0f;
float gPitch = 1.0f;
float gRadius = 200.0f;
const float MOUSE_SENSTIVITY = 0.25f;
bool gWireFrame = false;
void glfw_onKey(GLFWwindow *window, int key, int scancode, int action, int mode);
void glfw_onMouseMove(GLFWwindow *window, double posX, double posY);
void glfw_onMouseScroll(GLFWwindow *window, double deltaX, double deltaY);
int main()
{
if (!engine.init(1024, 768, title))
{
std::cerr << "OpenGL init failed" << std::endl;
std::cin.get();
return -1;
}
//set callbacks
glfwSetKeyCallback(engine.getWindow(), glfw_onKey);
glfwSetCursorPosCallback(engine.getWindow(), glfw_onMouseMove);
std::vector<Vertex> VER;
std::vector<glm::vec3> verts;
std::vector<unsigned int> indices;
std::vector<glm::vec3> norms;
int subDiv = 1000;
int width = 500;
int height = 500;
int size = 0;
for (int row = 0; row < subDiv; row++)
{
for (int col = 0; col < subDiv; col++)
{
float x = (float)((col * width) / subDiv - (width / 2.0));
float z = ((subDiv - row) * height) / subDiv - (height / 2.0);
glm::vec3 pos = glm::vec3(x, 0, z);
verts.push_back(pos);
}
}
size = subDiv * subDiv;
size = verts.size();
for (int row = 0; row < subDiv -1 ; row++)
{
for (int col = 0; col < subDiv -1; col++)
{
int row1 = row * (subDiv);
int row2 = (row+1) * (subDiv);
indices.push_back(row1+col);
indices.push_back(row1+col+1);
indices.push_back( row2+col+1);
indices.push_back(row1+col);
indices.push_back( row2+col+1);
indices.push_back(row2+col);
}
}
for (int i = 0; i < verts.size(); i++)
{
Vertex vertex;
vertex.position = verts[i];
vertex.normal = glm::vec3(0, 0, 0);
vertex.texCoords = glm::vec2(0, 0);
VER.push_back(vertex);
}
VER.begin();
for (int i = 0; i < indices.size(); i += 3)
{
Vertex a = VER[indices[i]];
Vertex b = VER[indices[i + 1]];
Vertex c = VER[indices[i + 2]];
glm::vec3 p = glm::cross(b.position - a.position, c.position - a.position);
VER[indices[i]].normal += p;
VER[indices[i + 1]].normal += p;
VER[indices[i + 2]].normal += p;
}
for (int i = 0; i < VER.size(); i++)
{
VER[i].normal = glm::normalize(VER[i].normal);
}
glm::vec3 cubePos = glm::vec3(0.0f, 0.0f, -5.0f);
GLuint vbo, vao, ibo;
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, VER.size() * sizeof(Vertex), &VER[0], GL_STATIC_DRAW);
// Vertex Positions
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)0);
glEnableVertexAttribArray(0);
// Normals attribute
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(3 * sizeof(GLfloat)));
glEnableVertexAttribArray(1);
// Vertex Texture Coords
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(6 * sizeof(GLfloat)));
glEnableVertexAttribArray(2);
int n = indices.size() * sizeof(unsigned int);
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned int), &indices[0], GL_STATIC_DRAW);
glBindVertexArray(0);
ShaderProgram shaderProgram;
shaderProgram.loadShaders("shaders/vert.glsl", "shaders/frag.glsl");
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
while (!glfwWindowShouldClose(engine.getWindow()))
{
glfwPollEvents();
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glm::mat4 model, view, projection;
model = glm::mat4(1.0f);
orbitCamera.setLookAt(glm::vec3(0, 0, 0));
orbitCamera.rotate(gYaw, gPitch);
orbitCamera.setRadius(gRadius);
model = glm::translate(model, glm::vec3(0, 0, 0));
//model = glm::scale(model, glm::vec3(1, 0, 1));
//model = scaleMat;
projection = glm::perspective(glm::radians(45.0f), (float)engine.getWidth() / (float)engine.getHeight(), 0.00001f, 100.0f);
shaderProgram.use();
glm::vec3 viewPos;
viewPos.x = orbitCamera.getPosition().x;
viewPos.y = orbitCamera.getPosition().y;
viewPos.z = orbitCamera.getPosition().z;
shaderProgram.setUniform("projection", projection);
shaderProgram.setUniform("view", orbitCamera.getViewMatrix());
shaderProgram.setUniform("model", model);
shaderProgram.setUniform("lightPos", glm::vec3(5, 10, 10));
shaderProgram.setUniform("viewPos", viewPos);
glBindVertexArray(vao);
glDrawElements(GL_TRIANGLES,indices.size(), GL_UNSIGNED_INT, 0);
//glDrawArrays(GL_TRIANGLES, 0, VER.size());
glBindVertexArray(0);
glfwSwapBuffers(engine.getWindow());
}
//cleanup
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(1, &vbo);
glfwTerminate();
return 0;
}
void glfw_onKey(GLFWwindow *window, int key, int scancode, int action, int mode)
{
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
{
glfwSetWindowShouldClose(window, GL_TRUE);
}
if (key == GLFW_KEY_E && action == GLFW_PRESS)
{
gWireFrame = !gWireFrame;
if (gWireFrame)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
else
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
}
}
void glfw_onMouseMove(GLFWwindow *window, double posX, double posY)
{
static glm::vec2 lastMousePos = glm::vec2(0, 0);
if (glfwGetMouseButton(engine.getWindow(), GLFW_MOUSE_BUTTON_LEFT) == 1)
{
gYaw -= ((float)posX - lastMousePos.x) * MOUSE_SENSTIVITY;
gPitch += ((float)posY - lastMousePos.y) * MOUSE_SENSTIVITY;
}
if (glfwGetMouseButton(engine.getWindow(), GLFW_MOUSE_BUTTON_RIGHT) == 1)
{
float dx = 0.01f * ((float)posX - lastMousePos.x);
float dy = 0.01f * ((float)posY - lastMousePos.y);
gRadius += dx - dy;
}
lastMousePos.x = (float)posX;
lastMousePos.y = (float)posY;
}
This is the main code. Rest is just basic initializing code, nothing fancy.
I've tried changing the swapinterval but that doesn't seems to be the problem.
I can share code for the other classes if anyone wants to take a look. And I've also tried lowering the sub divisions.
Edit*
After increasing the value of far plane to 8000:
Still not crisp.
the edit with second image is telling you what is happening ... if tampering with znear/zfar changes output like that it means your depth buffer has low bitwidth to the range you want to use...
However increasing zfar should make things worse (you just for some reason don't see it maybe its cut off or some weird math accuracy singularity).
for me its usual to select the planes so:
zfar/znear < (2^depth_buffer_bitwidth)/2
check you depth_buffer_bitwidth
Try to use 24 bits (you might have 16 bits right now). That should work on all gfx cards these days. You can try 32 bits too but that will work only on newer cards. I am using this code to get the max I can:
What is the proper OpenGL initialisation on Intel HD 3000?
However you are using GLFW so you need to find how to do it in it ... probably there is some hint for this in it ...
increase znear as much as you can
tampering znear has much much more impact than zfar...
Use linear depth buffer
this is the best option for large depth range views like terrains that covers stuf in whole depth view range. See:
How to correctly linearize depth in OpenGL ES in iOS?
however you need shaders and new api for this... I do not think this is doable in old api but luckily you are on new api already ...
if none of above is enough
You can stack up more frustrums together at a cost of multiple rendering of the same geometry. for more info see:
Is it possible to make realistic n-body solar system simulation in matter of size and mass?
How do you initialize OpenGL?
Are you using GL_BLEND?
Using blending is nice to get anti-aliased polygon edges, however it also means your z-buffer gets updated even when a very translucent fragment is drawn. This prevents other opaque fragments with the same z-depth from being drawn, which might be what is causing those holes. You could try disabling GL_BLEND to see if the issue goes away.
What depth function are you using?
By default it is set to GL_LESS. You might want to try glDepthFunc(GL_LEQUAL); So fragments with the same z-depth will be drawn. However, due to rounding errors this might not solve your problem entirely.

How should I be setting my Attribute Pointers for my text?

I've defined a block of vertex data like so:
struct vertex {
float x, y, u, v;
};
struct vertex_group {
vertex tl, bl, br, tr;
glm::vec4 color;
vertex_group(float x, float y, float width, float height, glm::vec4 c) {
tl.x = x; tl.y = y + height; tl.u = 0; tl.v = 0;
bl.x = x; bl.y = y; bl.u = 0; bl.v = 1;
br.x = x + width; br.y = y; br.u = 1; br.v = 1;
tr.x = x + width; tr.y = y + height; tr.u = 1; tr.v = 0;
color = c;
}
vertex_group(positioned_letter const& l) :
vertex_group(l.x, l.y, l.width, l.height, l.l.color) {
}
const float * data() const {
return &tl.x;
}
};
The attribute pointers are set like this:
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, (void*)(4 * 4 * sizeof(GLfloat)));
And the draw code is invoked like so:
vertex_group vertices(l);
glBindTexture(GL_TEXTURE_2D, g.texture);
glBindBuffer(GL_ARRAY_BUFFER, objects.rect_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices.data(), GL_STREAM_DRAW);
glDrawArrays(GL_QUADS, 0, 4);
The basic idea is that all four vertices of the quad should all be using the same data for color, even as they require different values for position and texture data. However, when I set the color to red (1,0,0,1), the results on screen are.... not quite right.
Just for reference sake, if the *only changes I make are to the first two sections of code, to the following:
struct vertex {
float x, y, u, v;
};
struct vertex_group {
vertex tl;
glm::vec4 color1;
vertex bl;
glm::vec4 color2;
vertex br;
glm::vec4 color3;
vertex tr;
glm::vec4 color4;
vertex_group(float x, float y, float width, float height, glm::vec4 c) {
tl.x = x; tl.y = y + height; tl.u = 0; tl.v = 0;
bl.x = x; bl.y = y; bl.u = 0; bl.v = 1;
br.x = x + width; br.y = y; br.u = 1; br.v = 1;
tr.x = x + width; tr.y = y + height; tr.u = 1; tr.v = 0;
color1 = color2 = color3 = color4 = c;
}
vertex_group(positioned_letter const& l) :
vertex_group(l.x, l.y, l.width, l.height, l.l.color) {
}
const float * data() const {
return &tl.x;
}
};
(other part)
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 8 * sizeof(GLfloat), (void*)(4 * sizeof(GLfloat)));
It renders correctly:
So in a nutshell, my question is: I'd like to structure my data (and render with it) like xyuvxyuvxyuvxyuvrgba but the only way I can get it to work is by doing xyuvrgbaxyuvrgbaxyuvrgbaxyuvrgba. How do I set my pointers/call the draw function so that I can use the first method?
You can't do that. But you can achieve this layout using instanced rendering:
xyuvxyuvxyuvxyuv // <- only once
whrgbawhrgbawhrgbawhrgba... // <- repeated per glyph
where w and h are the sizes of each quad that are to be applied in the vertex shader.
Here I split it in two buffers, but you can technically load it all into one buffer. Also I use the OpenGL 4.5 bindless API here because I think that it is easier to use. If you don't have it yet then you can change it to use the older calls accordingly.
float quad[] = {
0, 1, 0, 0,
0, 0, 0, 1,
1, 1, 1, 0,
1, 0, 1, 1,
};
struct Instance {
vec2 size;
// TODO: add index of the glyph you want to render
vec4 color;
};
Instance inst[] = { ... };
int ninst = sizeof(inst)/sizeof(inst[0]);
GLuint quad_buf = ... create buffer from quad[] ...;
GLuint inst_buf = ... create buffer from inst[] ...;
GLuint vao;
glCreateVertexArrays(1, &vao);
glEnableVertexArrayAttrib(vao, 0);
glVertexArrayAttribFormat(vao, 0, 4, GL_FLOAT, GL_FALSE, 0);
glVertexArrayAttribBinding(vao, 0, 0); // from 0th buffer
glEnableVertexArrayAttrib(vao, 1);
glVertexArrayAttribFormat(vao, 1, 2, GL_FLOAT, GL_FALSE, offsetof(Instance, size));
glVertexArrayAttribBinding(vao, 1, 1); // from 1st buffer
glEnableVertexArrayAttrib(vao, 2);
glVertexArrayAttribFormat(vao, 2, 4, GL_FLOAT, GL_FALSE, offsetof(Instance, color));
glVertexArrayAttribBinding(vao, 2, 1); // from 1st buffer
glVertexArrayVertexBuffer(vao, 0, quad_buf, 0, sizeof(float)*4); // 0th buffer is the quad
glVertexArrayVertexBuffer(vao, 1, inst_buf, 0, sizeof(Instance)); // 1th buffer for instances
glVertexArrayBindingDivisor(vao, 1, 1); // 1st buffer advances once per instance
// to draw:
glBindTexture(...);
glBindVertexArray(vao);
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, ninst);

Instantiation order changing draw in OpenGL using VAO

I trying to use VAOs, VBOs and IBOs to draw a bunch of sphere over a plane. Before using these, everything was drawn as expected. After I started to use those, things got weird. I can't post my whole code here because I have 5 classes (but if necessary I can provide a link to my code), so I'll try to post what I think it's useful.
With this class I can draw a sphere:
SphereShaderProgram::SphereShaderProgram(std::string vertexShaderPath, std::string fragmentShaderPath) : ProgramManager(vertexShaderPath, fragmentShaderPath)
{
_sphereH = 20;
_sphereW = 20;
_vbo = 0;
_vao = 0;
_ibo = 0;
CreateProgram();
BuildSphere();
BuildVAO();
}
SphereShaderProgram::~SphereShaderProgram()
{
glDeleteVertexArrays(1, &_vao);
glDeleteBuffers(1, &_vbo);
glDeleteBuffers(1, &_ibo);
}
void SphereShaderProgram::DrawSphere(const glm::mat4 &Projection, const glm::mat4 &ModelView)
{
_ModelViewProjection = Projection * ModelView;
_ModelView = ModelView;
Bind(); //glUseProgram
glBindVertexArray(_vao);
LoadVariables();
glDrawElements(GL_TRIANGLES, _sphereIndexes.size(), GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
UnBind();
}
int SphereShaderProgram::Get1DIndex(int line, int column)
{
return line * (int) _sphereH + column;
}
void SphereShaderProgram::BuildSphere()
{
for (int l = 0; l < _sphereH - 1; l++)
{
for (int c = 0; c < _sphereW - 1; c++)
{
int v1_1 = Get1DIndex(l, c);
int v2_1 = Get1DIndex(l + 1, c + 1);
int v3_1 = Get1DIndex(l + 1, c);
int v1_2 = Get1DIndex(l, c);
int v2_2 = Get1DIndex(l, c + 1);
int v3_2 = Get1DIndex(l + 1, c + 1);
_sphereIndexes.push_back(v1_1);
_sphereIndexes.push_back(v2_1);
_sphereIndexes.push_back(v3_1);
_sphereIndexes.push_back(v1_2);
_sphereIndexes.push_back(v2_2);
_sphereIndexes.push_back(v3_2);
}
}
for (int l = 0; l < _sphereH; l++)
{
for (int c = 0; c < _sphereW; c++)
{
float theta = ((float) l / (_sphereH - 1)) * (float) PI;
float phi = ((float) c / (_sphereW - 1)) * 2 * (float) PI;
float x = sin(theta) * cos(phi);
float z = sin(theta) * sin(phi);
float y = cos(theta);
_sphereCoordinates.push_back(x);
_sphereCoordinates.push_back(y);
_sphereCoordinates.push_back(z);
}
}
}
void SphereShaderProgram::BuildVAO()
{
// Generate and bind the vertex array object
glGenVertexArrays(1, &_vao);
glBindVertexArray(_vao);
// Generate and bind the vertex buffer object
glGenBuffers(1, &_vbo);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glBufferData(GL_ARRAY_BUFFER, _sphereCoordinates.size() * sizeof(float), &_sphereCoordinates[0], GL_STATIC_DRAW);
// Generate and bind the index buffer object
glGenBuffers(1, &_ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, _sphereIndexes.size() * sizeof(unsigned int), &_sphereIndexes[0], GL_STATIC_DRAW);
glBindVertexArray(0);
}
void SphereShaderProgram::LoadUniformVariables()
{
glm::mat4 MVP = _ModelViewProjection;
glm::mat4 MV = _ModelView;
glm::mat3 N = glm::transpose(glm::inverse(glm::mat3(MV)));
glm::vec4 AC = glm::vec4(0.2, 0.2, 0.2, 1.0);
glm::vec4 DC = glm::vec4(0.7, 0.0, 0.0, 1.0);
glm::vec4 SC = glm::vec4(0.1, 0.1, 0.1, 1.0);
glm::vec3 LP = glm::vec3(1.0, 6.0, 4.0);
// OpenGL Matrices
GLuint ModelViewProjection_location = glGetUniformLocation(GetProgramID(), "mvpMatrix");
glUniformMatrix4fv(ModelViewProjection_location, 1, GL_FALSE, glm::value_ptr(MVP));
GLuint ModelView_location = glGetUniformLocation(GetProgramID(), "mvMatrix");
glUniformMatrix4fv(ModelView_location, 1, GL_FALSE, glm::value_ptr(MV));
GLuint Normal_location = glGetUniformLocation(GetProgramID(), "normalMatrix");
glUniformMatrix3fv(Normal_location, 1, GL_FALSE, glm::value_ptr(N));
// Lighting
GLuint AmbientColor_location = glGetUniformLocation(GetProgramID(), "ambientColor");
glUniform4fv(AmbientColor_location, 1, glm::value_ptr(AC));
GLuint DiffuseColor_location = glGetUniformLocation(GetProgramID(), "diffuseColor");
glUniform4fv(DiffuseColor_location, 1, glm::value_ptr(DC));
GLuint SpecularColor_location = glGetUniformLocation(GetProgramID(), "specularColor");
glUniform4fv(SpecularColor_location, 1, glm::value_ptr(SC));
GLuint LightPosition_location = glGetUniformLocation(GetProgramID(), "vLightPosition");
glUniform3fv(LightPosition_location, 1, glm::value_ptr(LP));
}
void SphereShaderProgram::LoadAtributeVariables()
{
// Vertex Attributes
GLuint VertexPosition_location = glGetAttribLocation(GetProgramID(), "vPosition");
glEnableVertexAttribArray(VertexPosition_location);
glVertexAttribPointer(VertexPosition_location, 3, GL_FLOAT, GL_FALSE, 0, 0);
}
void SphereShaderProgram::LoadVariables()
{
LoadUniformVariables();
LoadAtributeVariables();
}
And with that, a plane:
PlaneShaderProgram::PlaneShaderProgram(std::string vertexShaderPath, std::string fragmentShaderPath) : ProgramManager(vertexShaderPath, fragmentShaderPath)
{
CreateProgram();
_vbo = 0;
_vao = 0;
_ibo = 0;
BuildPlane();
BuildVAO();
}
PlaneShaderProgram::~PlaneShaderProgram()
{
glDeleteVertexArrays(1, &_vao);
glDeleteBuffers(1, &_vbo);
glDeleteBuffers(1, &_ibo);
}
void PlaneShaderProgram::DrawPlane(const glm::mat4 &Projection, const glm::mat4 &ModelView)
{
_ModelViewProjection = Projection * ModelView;
_ModelView = ModelView;
Bind();
glBindVertexArray(_vao);
LoadVariables();
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
UnBind();
}
void PlaneShaderProgram::BuildPlane()
{
_coordinates[0] = -1.0f;
_coordinates[1] = 0.0f;
_coordinates[2] = -1.0f;
_coordinates[3] = -1.0f;
_coordinates[4] = 0.0f;
_coordinates[5] = 1.0f;
_coordinates[6] = 1.0f;
_coordinates[7] = 0.0f;
_coordinates[8] = 1.0f;
_coordinates[9] = 1.0f;
_coordinates[10] = 0.0f;
_coordinates[11] = -1.0f;
_indexes[0] = 0;
_indexes[1] = 1;
_indexes[2] = 2;
_indexes[3] = 0;
_indexes[4] = 2;
_indexes[5] = 3;
}
void PlaneShaderProgram::BuildVAO()
{
// Generate and bind the vertex array object
glGenVertexArrays(1, &_vao);
glBindVertexArray(_vao);
// Generate and bind the vertex buffer object
glGenBuffers(1, &_vbo);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glBufferData(GL_ARRAY_BUFFER, 12 * sizeof(GLfloat), _coordinates, GL_STATIC_DRAW);
// Generate and bind the index buffer object
glGenBuffers(1, &_ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, 6 * sizeof(GLuint), _indexes, GL_STATIC_DRAW);
glBindVertexArray(0);
}
void PlaneShaderProgram::LoadUniformVariables()
{
// OpenGL Matrices
GLuint ModelViewProjection_location = glGetUniformLocation(GetProgramID(), "mvpMatrix");
glUniformMatrix4fv(ModelViewProjection_location, 1, GL_FALSE, glm::value_ptr(_ModelViewProjection));
}
void PlaneShaderProgram::LoadAtributeVariables()
{
// Vertex Attributes
GLuint VertexPosition_location = glGetAttribLocation(GetProgramID(), "vPosition");
glEnableVertexAttribArray(VertexPosition_location);
glVertexAttribPointer(VertexPosition_location, 3, GL_FLOAT, GL_FALSE, 0, 0);
}
void PlaneShaderProgram::LoadVariables()
{
LoadUniformVariables();
LoadAtributeVariables();
}
This, on the other hand, is my main:
int main(void)
{
// Set the error callback
glfwSetErrorCallback(ErrorCallback);
// Initialize GLFW
if (!glfwInit())
{
printf("Error initializing GLFW!\n");
exit(EXIT_FAILURE);
}
// Set the GLFW window creation hints - these are optional
glfwWindowHint(GLFW_SAMPLES, 4);
// Create a window and create its OpenGL context
GLFWwindow* window = glfwCreateWindow(width, height, "OpenGL 4 Base", NULL, NULL);
// If the window couldn't be created
if (!window)
{
fprintf(stderr, "Failed to open GLFW window.\n");
glfwTerminate();
exit(EXIT_FAILURE);
}
// Sets the context of the specified window on the calling thread
glfwMakeContextCurrent(window);
// Initialize GLEW
glewExperimental = true;
GLenum glewError = glewInit();
if (glewError != GLEW_OK)
{
printf("Error initializing GLEW! %s\n", glewGetErrorString(glewError));
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwSetKeyCallback(window, KeyCallback);
glfwSetWindowSizeCallback(window, WindowSizeCallback);
glfwSetScrollCallback(window, ScrollCallback);
// Set the view matrix
glm::mat4 ModelView = glm::lookAt(glm::vec3(0.0f, 7.0f, 15.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
// Init matrix stack
glm_ModelViewMatrix.push(ModelView);
PlaneShaderProgram PlaneShaderProgram("FloorVertexShader.txt", "FloorFragShader.txt");
SphereShaderProgram SphereShaderProgram("ADSPerVertexVertexShader.txt", "ADSPerVertexFragShader.txt");
//SphereShaderProgram SphereShaderProgram = SphereShaderProgram("ADSPerPixelVertexShader.txt", "ADSPerPixelFragShader.txt");
// Set a background color
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
// 3D objects
glEnable(GL_DEPTH_TEST);
float d = 2.0f;
float p0 = -10.0f + d / 2;
// Main Loop
while (!glfwWindowShouldClose(window))
{
// Clear color buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Clone current modelview matrix, which can now be modified
glm_ModelViewMatrix.push(glm_ModelViewMatrix.top());
{
//------- ModelView Transformations
// Zoom in/out
glm_ModelViewMatrix.top() = glm::translate(glm_ModelViewMatrix.top(), glm::vec3(0.0, 0.0, zoom));
// Rotation
glm_ModelViewMatrix.top() = glm::rotate(glm_ModelViewMatrix.top(), beta, glm::vec3(1.0, 0.0, 0.0));
glm_ModelViewMatrix.top() = glm::rotate(glm_ModelViewMatrix.top(), alpha, glm::vec3(0.0, 0.0, 1.0));
//------- Draw the plane
glm_ModelViewMatrix.push(glm_ModelViewMatrix.top());
{
glm_ModelViewMatrix.top() = glm::scale(glm_ModelViewMatrix.top(), glm::vec3(7.0f, 1.0f, 7.0f));
PlaneShaderProgram.DrawPlane(Projection, glm_ModelViewMatrix.top());
}
glm_ModelViewMatrix.pop();
//------- Draw spheres
for (int i = 0; i < 10; i++)
{
for (int j = 0; j < 10; j++)
{
glm_ModelViewMatrix.push(glm_ModelViewMatrix.top());
{
glm_ModelViewMatrix.top() = glm::scale(glm_ModelViewMatrix.top(), glm::vec3(0.5f, 0.5f, 0.5f));
glm_ModelViewMatrix.top() = glm::translate(glm_ModelViewMatrix.top(), glm::vec3(p0 + i * d, 1.0f, p0 + j * d));
SphereShaderProgram.DrawSphere(Projection, glm_ModelViewMatrix.top());
}
glm_ModelViewMatrix.pop();
}
}
}
glm_ModelViewMatrix.pop();
// Swap buffers
glfwSwapBuffers(window);
// Get and organize events, like keyboard and mouse input, window resizing, etc...
glfwPollEvents();
}
// Close OpenGL window and terminate GLFW
glfwDestroyWindow(window);
// Finalize and clean up GLFW
glfwTerminate();
exit(EXIT_SUCCESS);
}
Instantiating the plane and then the sphere program, I get the following result (no plane at all):
Changing the order, that is the result:
I'm trying to find a clue about what I'm missing, because I don't have any idea about what is wrong. Before using VAOs (just using glVertexAttribPointer and glDrawElements), everything was drawn correctly.
Thank you in advance.
The problem is with the placement of the glVertexAttribPointer() call. You're calling it in the LoadAtributeVariables() method, which in turn is called from the Draw*() method.
This should really be part of the VAO setup, for a couple of reasons:
It's inefficient to make the call on every redraw. This call sets up state that is part of the VAO state. That's the whole idea of using VAOs in the first place. You can set up all this state once during setup, and then only need to bind the VAO again before the draw call, which sets up all the state again with a single call.
In your case, the VBO is not bound at the time you make the call. glVertexAttribPointer() sets up the attribute to pull data from the currently bound VBO, i.e. the buffer bound as GL_ARRAY_BUFFER.
The first problem is only a performance issue. The second is the reason why your code does not work, since you do not have the correct VBO bound when glVertexAttribPointer() is called.
To fix this, you only need to move the LoadAtributeVariables() call into BuildVAO(), at this location:
// Generate and bind the vertex buffer object
glGenBuffers(1, &_vbo);
glBindBuffer(GL_ARRAY_BUFFER, _vbo);
glBufferData(GL_ARRAY_BUFFER, _sphereCoordinates.size() * sizeof(float), &_sphereCoordinates[0], GL_STATIC_DRAW);
LoadAtributeVariables();
and remove it from where it currently is, so that it is not called before each draw call anymore.

Screen corruption when using Opengl Buffers

I'm writing an Opengl game but I got some problems using Opengl buffers.
My old code thats works (but has a lot of CPU consumption and low fps) looks like this:
void Terrain::drawObject(sf::RenderWindow* window)
{
float scale = 5.0f / max(width_ - 1, length_ - 1);
glScalef(scale, scale, scale);
glTranslatef(-(float) (width_ - 1) / 2, 0.0f, -(float) (length_ - 1) / 2);
bool texture = true;
for (int z = 0; z < width_ - 1; z++) {
//Makes OpenGL draw a triangle at every three consecutive vertices
if (getHeight(0, z) > 15)
{
glBindTexture(GL_TEXTURE_2D, textures_.find(Layer::High)->second);
}
else
{
glBindTexture(GL_TEXTURE_2D, textures_.find(Layer::Mid)->second);
}
glBegin(GL_TRIANGLE_STRIP);
for (int x = 0; x < width_; x++) {
sf::Vector3f normal = getNormal(x, z);
glNormal3f(normal.x, normal.y, normal.z);
if (texture)
{
glTexCoord2f(0, 0);
}
else
{
glTexCoord2f(1, 0);
}
glVertex3f((GLfloat) x, (GLfloat) getHeight(x, z), (GLfloat) z);
normal = getNormal(x, z + 1);
glNormal3f(normal.x, normal.y, normal.z);
if (texture)
{
glTexCoord2f(0, 1);
texture = !texture;
}
else
{
glTexCoord2f(1, 1);
texture = !texture;
}
glVertex3f((GLfloat) x, (GLfloat) getHeight(x, z + 1), (GLfloat) z + 1);
}
glEnd();
}
}
Now I have changed my code to get a higher fps. I use Opengl buffers to get that. But when I use them everything on the screen is corrupted. i use following source code now:
void Terrain::drawObject(sf::RenderWindow* window)
{
if (!buffersCreated_)
{
createBuffers();
buffersCreated_ = true;
}
float scale = 5.0f / max(width_ - 1, length_ - 1);
glScalef(scale, scale, scale);
glTranslatef(-(float) (width_ - 1) / 2, 0.0f, -(float) (length_ - 1) / 2);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textures_.find(Layer::Mid)->second);
glBindBuffer(GL_ARRAY_BUFFER, textCoordBuffer_);
glTexCoordPointer(2, GL_FLOAT, 0, (char *) NULL);
glEnableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_);
glNormalPointer(GL_FLOAT, 0, (char *) NULL);
glEnableClientState(GL_VERTEX_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_);
glVertexPointer(3, GL_FLOAT, 0, (char *) NULL);
glDrawArrays(GL_TRIANGLE_STRIP, 0, vhVertexCount);
glDisableClientState(GL_VERTEX_ARRAY);
glDisable(GL_TEXTURE_2D);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
}
void Terrain::createBuffers()
{
vhVertexCount = (int) (width_ * length_ * 6) / (1 * 1);
sf::Vector3f* vhVertices = new sf::Vector3f[vhVertexCount];
sf::Vector3f* vhNormal = new sf::Vector3f[vhVertexCount];
sf::Vector2i* vhTexCoords = new sf::Vector2i[vhVertexCount];
bool texture = true;
int nIndex = 0;
for (int z = 0; z < length_ - 1; z++) {
for (int x = 0; x < width_; x++) {
sf::Vector3f normal = getNormal(x, z);
if (texture)
{
vhTexCoords[nIndex] = sf::Vector2i(0, 0);
}
else
{
vhTexCoords[nIndex] = sf::Vector2i(1, 0);
}
vhVertices[nIndex] = sf::Vector3f((float) x, getHeight(x, z), (float) z);
vhNormal[nIndex] = sf::Vector3f(normal.x, normal.y, normal.z);
nIndex++;
normal = getNormal(x, z + 1);
if (texture)
{
vhTexCoords[nIndex] = sf::Vector2i(0, 1);
}
else
{
vhTexCoords[nIndex] = sf::Vector2i(1, 1);
}
vhVertices[nIndex] = sf::Vector3f((float) x, getHeight(x, z + 1), (float) z + 1);
vhNormal[nIndex] = sf::Vector3f(normal.x, normal.y, normal.z);
nIndex++;
}
}
glGenBuffers(1, &vertexBuffer_);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer_);
glBufferData(GL_ARRAY_BUFFER, vhVertexCount * sizeof(sf::Vector3f), vhVertices, GL_STATIC_DRAW);
glGenBuffers(1, &normalBuffer_);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer_);
glBufferData(GL_ARRAY_BUFFER, vhVertexCount * sizeof(sf::Vector3f), vhNormal, GL_STATIC_DRAW);
glGenBuffers(1, &textCoordBuffer_);
glBindBuffer(GL_ARRAY_BUFFER, textCoordBuffer_);
glBufferData(GL_ARRAY_BUFFER, vhVertexCount * sizeof(sf::Vector2i), vhTexCoords, GL_STATIC_DRAW);
delete [] vhVertices;
vhVertices = nullptr;
delete [] vhNormal;
vhNormal = nullptr;
delete [] vhTexCoords;
vhTexCoords = nullptr;
}
I use SFML to create the window and render 2D stuff like the menu in the lower left corner.
The code to render SFML stuff with Opengl stuff looks like:
void GameEngine::gameDraw()
{
// Clear the depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
if (camera_ != nullptr)
{
camera_->drawCamera();
}
openglObjectsMutex_.lock();
for (OpenglObject* openglObject : openglObjects_)
{
openglObject->drawObject(window_);
}
openglObjectsMutex_.unlock();
window_->pushGLStates();
sfmlObjectsMutex_.lock();
for (SfmlObject * gameObject : sfmlObjects_)
{
gameObject->drawObject(window_);
}
sfmlObjectsMutex_.unlock();
window_->popGLStates();
}
Can someone find any problems with the buffer code?
The above image is the correct one but with low fps.
After changing the source to using buffers i got the below image.
SFML but it can only saves/restores OpenGL 2.x states. We must disable what we enable in +3.x states. It works adding:
It's fixed adding at the end of own drawing something like:
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 );
glBindTexture( GL_TEXTURE_2D, 0 );
glDisableVertexAttribArray( 0 );
glUseProgram( 0 );

OpenGL: Terrain not drawing (heightmap)

EDIT: I'm thinking the problem might be when I'm loading the vertices and indices. Maybe focus on that section :)
I'm trying to load a heightmap from a bmp file and displaying it in OpenGL. As with most things I try, everything compiles and runs without errors but nothing is drawn on the screen. I can't seem to isolate the issue that much, since all the code works on its own, but when combined to draw terrain, nothing works.
Terrain class
I have a terrain class. It has 2 VBOs, 1 IBO and 1 VAO. It also stores the vertices, indices, colours of the vertices and the heights. It is loaded from a bmp file.
Loading terrain:
Terrain* Terrain::loadTerrain(const std::string& filename, float height)
{
BitMap* bmp = BitMap::load(filename);
Terrain* t = new Terrain(bmp->width, bmp->length);
for(unsigned y = 0; y < bmp->length; y++)
{
for(unsigned x = 0; x < bmp->width; x++)
{
unsigned char color =
(unsigned char)bmp->pixels[3 * (y * bmp->width + x)];
float h = height * ((color / 255.0f) - 0.5f);
t->setHeight(x, y, h);
}
}
delete bmp;
t->initGL();
return t;
}
Initializing the buffers:
void Terrain::initGL()
{
// load vertices from heights data
vertices = new Vector4f[w * l];
int vertIndex = 0;
for(unsigned y = 0; y < l; y++)
{
for(unsigned x = 0; x < w; x++)
{
vertices[vertIndex++] = Vector4f((float)x, (float)y, heights[y][x], 1.0f);
}
}
// generate indices for indexed drawing
indices = new GLshort[(w - 1) * (l - 1) * 6]; // patch count * 6 (2 triangles per terrain patch)
int indicesIndex = 0;
for(unsigned y = 0; y < (l - 1); ++y)
{
for(unsigned x = 0; x < (w - 1); ++x)
{
int start = y * w + x;
indices[indicesIndex++] = (GLshort)start;
indices[indicesIndex++] = (GLshort)(start + 1);
indices[indicesIndex++] = (GLshort)(start + w);
indices[indicesIndex++] = (GLshort)(start + 1);
indices[indicesIndex++] = (GLshort)(start + 1 + w);
indices[indicesIndex++] = (GLshort)(start + w);
}
}
// generate colours for the vertices
colours = new Vector4f[w * l];
for(unsigned i = 0; i < w * l; i++)
{
colours[i] = Vector4f(0.0f, 1.0f, 0.0f, 1.0f); // let's make the entire terrain green
}
// THIS CODE WORKS FOR CUBES (BEGIN)
// vertex buffer object
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// index buffer object
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// colours vertex buffer object
glGenBuffers(1, &colour_vbo);
glBindBuffer(GL_ARRAY_BUFFER, colour_vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(colours), colours, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
// create vertex array object
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ARRAY_BUFFER, colour_vbo);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBindVertexArray(0);
// THIS CODE WORKS FOR CUBES (END)
}
The part where I create the VBOs, IBO and VAO works fine for cubes, they are drawn nicely.
Rendering terrain:
void Terrain::render()
{
glUseProgram(shaderProgram);
glBindVertexArray(vao);
int indices_length = (w - 1) * (l - 1) * 6;
glDrawElements(GL_TRIANGLES, indices_length, GL_UNSIGNED_SHORT, 0);
}
Shaders
These are the vertex and fragment shaders.
Vertex:
#version 330
layout (location = 0) in vec4 position;
layout (location = 1) in vec4 vertexColour;
out vec4 fragmentColour;
uniform vec3 offset;
uniform mat4 perspectiveMatrix;
void main()
{
vec4 cameraPos = position + vec4(offset.x, offset.y, offset.z, 0.0);
gl_Position = perspectiveMatrix * cameraPos;
fragmentColour = vertexColour;
}
Fragment:
#version 330
in vec4 fragmentColour;
out vec4 outputColour;
void main()
{
outputColour = fragmentColour;
}
Perspective matrix
Here are the settings for the "camera":
struct CameraSettings
{
static const float FRUSTUM_SCALE = 1.0f;
static const float Z_NEAR = 0.5f;
static const float Z_FAR = 3.0f;
static float perspective_matrix[16];
};
float CameraSettings::perspective_matrix[16] = {
FRUSTUM_SCALE,
0, 0, 0, 0,
FRUSTUM_SCALE,
0, 0, 0, 0,
(Z_FAR + Z_NEAR) / (Z_NEAR - Z_FAR),
-1.0f,
0, 0,
(2 * Z_FAR * Z_NEAR) / (Z_NEAR - Z_FAR),
0
};
The uniforms get filled in after initGL() is called:
// get offset uniform
offsetUniform = ShaderManager::getUniformLocation(shaderProgram, "offset");
perspectiveMatrixUniform = ShaderManager::getUniformLocation(shaderProgram, "perspectiveMatrix");
// set standard uniform data
glUseProgram(shaderProgram);
glUniform3f(offsetUniform, xOffset, yOffset, zOffset);
glUniformMatrix4fv(perspectiveMatrixUniform, 1, GL_FALSE, CameraSettings::perspective_matrix);
glUseProgram(0);
Could someone check out my code and give suggestions?
I'm pretty sure that when you say :
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
you actually want to say :
glBufferData(GL_ARRAY_BUFFER, sizeof (Vector4f) * w * l, vertices, GL_STATIC_DRAW);
(same to color buffer, etc)