std::vector and VBOs render only the last shape [duplicate] - c++

This question already has an answer here:
Mesh class called with default constructor not working OpenGL C++
(1 answer)
Closed 6 years ago.
I'm running through a weird problem. Basically I have Mesh class depending on a flag, I can draw a point, a line, or a triangle. For example, if I want to draw two lines, I can do the following
Vertex vertices1[] = {
Vertex(glm::vec3(-.5, -.5, 0)),
Vertex(glm::vec3( 0, .5, 0))
};
Vertex vertices2[] = {
Vertex(glm::vec3( .5, -.5, 0)),
Vertex(glm::vec3( -.5, .5, 0))
};
Mesh mesh1(vertices1, sizeof(vertices1)/sizeof(vertices1[0]), 'L');
Mesh mesh2(vertices2, sizeof(vertices2)/sizeof(vertices2[0]), 'L');
// Rendering Loop:
while( Window.isOpen() ){
...
//================( Rendering )=========================
ourShader.Use();
mesh1.draw();
mesh2.draw();
//======================================================
...
}
The result is
Now I would like to use std::vector<Mesh> and loop through meshes. My attempt is as follows
std::vector<Mesh> meshes;
meshes.push_back(mesh1);
meshes.push_back(mesh2);
while( Window.isOpen() ){
...
//================( Rendering )=========================
ourShader.Use();
for ( int i(0); i < meshes.size(); ++i )
meshes[i].draw();
//======================================================
...
}
With the preceding approach, only the last line is drawn and this is the result
Moreover, once I use .push_back() even if I don't loop through the vector, the last line is drawn. I don't understand why using std::vector deteriorates the rendering. I even tried meshes[0].draw() but with no luck. Any suggestions?
Edit:
This is the constructor of Mesh class
#include <iostream>
#include <vector>
#include <glm/glm.hpp>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "display.h"
#include "keyboard.h"
#include "shader.h"
class Vertex
{
public:
Vertex(const glm::vec3& p) : m_position(p)
{}
private:
glm::vec3 m_position;
};
class Mesh
{
public:
Mesh(Vertex* vertices, unsigned int numVertices, const char& flag);
~Mesh();
void draw();
private:
enum{
POSITION_VB,
NUM_BUFFERS
};
GLuint m_vertexArrayObject;
GLuint m_vertexArrayBuffers[NUM_BUFFERS];
unsigned int m_drawCount;
char m_flag;
};
Mesh::Mesh(Vertex* vertices, unsigned int numVertices, const char& flag) : m_flag(flag), m_drawCount(numVertices)
{
glGenVertexArrays(1, &m_vertexArrayObject);
glBindVertexArray(m_vertexArrayObject);
glGenBuffers(NUM_BUFFERS, m_vertexArrayBuffers);
glBindBuffer(GL_ARRAY_BUFFER, m_vertexArrayBuffers[POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, numVertices*sizeof(vertices[0]), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
Mesh::~Mesh()
{
glDeleteVertexArrays(1, &m_vertexArrayObject);
glDeleteBuffers(1, m_vertexArrayBuffers);
}
void Mesh::draw()
{
switch(m_flag)
{
case 'P':
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_POINTS, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'L':
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_LINES, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'T':
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_TRIANGLES, 0, m_drawCount);
glBindVertexArray(0);
break;
}
}
int main(void)
{
Display Window(800, 600, "OpenGL Window");
Keyboard myKeyboard( Window.getWindowPointer() );
Vertex vertices1[] = {
Vertex(glm::vec3(-.5, -.5, 0)),
Vertex(glm::vec3( 0, .5, 0))
};
Vertex vertices2[] = {
Vertex(glm::vec3( .5, -.5, 0)),
Vertex(glm::vec3( -.5, .5, 0))
};
Mesh mesh1(vertices1, sizeof(vertices1)/sizeof(vertices1[0]), 'L');
Mesh mesh2(vertices2, sizeof(vertices2)/sizeof(vertices2[0]), 'L');
std::vector<Mesh> meshes;
meshes.emplace_back(mesh1);
meshes.emplace_back(mesh2);
std::cout << meshes.size() << std::endl;
//*****************( SHADER )************************
Shader ourShader("shader.vs", "shader.frag");
glEnable(GL_PROGRAM_POINT_SIZE);
while( Window.isOpen() ){
Window.PollEvents();
Window.clear();
//================( Rendering )=========================
ourShader.Use();
//mesh1.draw();
//mesh2.draw();
for ( int i(0); i < meshes.size(); ++i )
meshes[i].draw();
//meshes[0].draw();
//meshes[1].draw();
//======================================================
Window.SwapBuffers();
}
glfwTerminate();
return 0;
}
Shaders
#version 330 core
out vec4 color;
void main()
{
color = vec4(1.0f,0.5f,0.2f,1.0f);
}
#version 330 core
layout (location = 0) in vec3 position;
void main()
{
gl_PointSize = 10.0;
gl_Position = vec4(position, 1.0);
}

As I suspected, the problem is with the (lack of) copy constructor. The default one just copies all the members. As a result your VAOs and buffers get deleted multiple times, even before you manage to draw anything (vectors move during reallocation, and if they can't move they copy). As a rule of thumb: if you have a non-default destructor, you must implement also a copy constructor and an assignment operator, or explicitly delete them if your class is not meant to be copyable.
For your concrete case the solutions are:
Quick solution: store pointers to meshes in the vector:
std::vector<Mesh*> meshes;
meshes.emplace_back(&mesh1);
meshes.emplace_back(&mesh2);
Correct solution: use proper RAII for resource management. Using the unique_ptr technique from here your MCVE code becomes:
class Mesh
{
public:
Mesh(Vertex* vertices, unsigned int numVertices, const char& flag);
void draw();
private:
//...
GLvertexarray m_vertexArrayObject;
GLbuffer m_vertexArrayBuffers[NUM_BUFFERS];
unsigned int m_drawCount;
char m_flag;
};
Mesh::Mesh(Vertex* vertices, unsigned int numVertices, const char& flag) : m_flag(flag), m_drawCount(numVertices)
{
GLuint id;
glGenVertexArrays(1, &id);
glBindVertexArray(id);
m_vertexArrayObject.reset(id);
for(int i = 0; i < NUM_BUFFERS; ++i)
{
glGenBuffers(1, &id);
glBindBuffer(GL_ARRAY_BUFFER, id);
m_vertexArrayBuffers[i].reset(id);
glBufferData(GL_ARRAY_BUFFER, numVertices*sizeof(vertices[0]), vertices, GL_STATIC_DRAW);
}
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
void Mesh::draw()
{
switch(m_flag)
{
case 'P':
glBindVertexArray(m_vertexArrayObject.get());
glDrawArrays(GL_POINTS, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'L':
glBindVertexArray(m_vertexArrayObject.get());
glDrawArrays(GL_LINES, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'T':
glBindVertexArray(m_vertexArrayObject.get());
glDrawArrays(GL_TRIANGLES, 0, m_drawCount);
glBindVertexArray(0);
break;
}
}
int main()
{
//...
Mesh mesh1(vertices1, sizeof(vertices1)/sizeof(vertices1[0]), 'L');
Mesh mesh2(vertices2, sizeof(vertices2)/sizeof(vertices2[0]), 'L');
std::vector<Mesh> meshes;
meshes.emplace_back(std::move(mesh1));
meshes.emplace_back(std::move(mesh2));
// ...
return 0;
}
Notice how there is no more need for defining a destructor, and your class automatically becomes movable but not copyable. Furthermore, if you have OpenGL 4.5 or ARB_direct_state_access then things get even simpler.

EDIT
The main problem is, that the destructor is called when you add the Mesh objects to the vector, therefore the underlying data gets cleaned up.
Further reading: Why does my class's destructor get called when I add instances to a vector? | What is The Rule of Three?
I'd personally create separate init_buffers and free_buffers methods to my Mesh class and use them appropriately. (Initialize buffers after the OpenGL context is obtained, free the buffers when the window is closed.)
This way you can start building meshes (and add them to the scene) before actually having the OpenGL context.
I've implemented the missing code parts and tried your code using GLFW using CLion.
It works. See code / CLion project here: OpenGLSandbox/main.cpp
The only code I've added are basically these, so it's your turn to figure out the difference / error.
// Constants
const size_t NUM_BUFFERS = 1;
const size_t POSITION_VB = 0;
// Vertex class
class Vertex {
private:
glm::vec3 mCoords;
public:
Vertex(glm::vec3 coords) : mCoords(coords) {};
};
// Mesh class
class Mesh {
private:
GLuint m_vertexArrayObject;
char m_flag;
unsigned int m_drawCount;
GLuint m_vertexArrayBuffers[NUM_BUFFERS];
public:
/* your ctor and draw method */
}

Related

wxGLCanvas huge memory usage after resizing

I have created a simple wxGLCanvas for demonstrating OpenGl using wxWidgets. The demo is working fine except when resizing the window the memory usage increases from a few megabytes to almost 400 megabytes and it stays there and doesn't decrease, here are the code snippets.
// ctor
TriangleCanvas::TriangleCanvas(wxWindow* parent, wxGLAttributes& attribList)
: wxGLCanvas(parent, attribList, wxID_ANY, { 0,0 }, wxDefaultSize),
m_vbo(0), m_vao(0), ctx_attr(new wxGLContextAttrs)
{
ctx_attr->CoreProfile().OGLVersion(4, 3).EndList();
m_context = new wxGLContext(this, NULL, ctx_attr);
Bind(wxEVT_PAINT, &TriangleCanvas::OnPaint, this);
Bind(wxEVT_SIZE, &TriangleCanvas::Resize, this);
}
// Paint method
void TriangleCanvas::OnPaint(wxPaintEvent& event)
{
wxPaintDC(this);
SetCurrent(*m_context);
shader->use();
// set background to black
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// draw the graphics
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_ebo);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glFlush();
SwapBuffers();
}
void TriangleCanvas::Resize(wxSizeEvent& event) {
event.Skip();
glViewport(0, 0, event.GetSize().x, event.GetSize().y);
if (!setup) {
InitializeGLEW();
SetupGraphics();
}
}
I think the best way to use wxGLCanvas with an extension loader is to use a helper class and keep all OpenGL drawing in the cpp portion of that helper class.
For example, here is a small helper class for drawing a triangle:
glhelper.h
#ifndef GLHELPER_H_INCLUDED
#define GLHELPER_H_INCLUDED
class GLHelper
{
public:
bool InitGlew();
void Render();
void SetSize(int w, int h);
bool InitData();
void Cleanup();
private:
unsigned int m_VBO, m_VAO, m_shaderProgram;
};
#endif // GLHELPER_H_INCLUDED
glhelper.cpp
#include <GL/glew.h>
#ifdef __WXMSW__
#include <GL/wglew.h>
#elif defined(__WXGTK__)
#include <GL/glxew.h>
#endif // defined
#include "glhelper.h"
static const char *vertexShaderSource = "#version 330 core\n"
"layout (location = 0) in vec3 aPos;\n"
"void main()\n"
"{\n"
" gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n"
"}\0";
static const char *fragmentShaderSource = "#version 330 core\n"
"out vec4 FragColor;\n"
"void main()\n"
"{\n"
" FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n"
"}\n\0";
bool GLHelper::InitGlew()
{
GLenum initStatus = glewInit();
return initStatus == GLEW_OK;
}
bool GLHelper::InitData()
{
unsigned int vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);
glCompileShader(vertexShader);
// check for shader compile errors
int success;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
if (!success)
{
return false;
}
// fragment shader
unsigned int fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
glCompileShader(fragmentShader);
// check for shader compile errors
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success);
if (!success)
{
return false;
}
// link shaders
m_shaderProgram = glCreateProgram();
glAttachShader(m_shaderProgram, vertexShader);
glAttachShader(m_shaderProgram, fragmentShader);
glLinkProgram(m_shaderProgram);
// check for linking errors
glGetProgramiv(m_shaderProgram, GL_LINK_STATUS, &success);
if (!success) {
return false;
}
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
// set up vertex data (and buffer(s)) and configure vertex attributes
// ------------------------------------------------------------------
float vertices[] = {
-0.5f, -0.5f, 0.0f, // left
0.5f, -0.5f, 0.0f, // right
0.0f, 0.5f, 0.0f // top
};
//unsigned int VBO, VAO;
glGenVertexArrays(1, &m_VAO);
glGenBuffers(1, &m_VBO);
// bind the Vertex Array Object first, then bind and set vertex buffer(s),
// and then configure vertex attributes(s).
glBindVertexArray(m_VAO);
glBindBuffer(GL_ARRAY_BUFFER, m_VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
// note that this is allowed, the call to glVertexAttribPointer registered
//VBO as the vertex attribute's bound vertex buffer object so afterwards we
//can safely unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
// You can unbind the VAO afterwards so other VAO calls won't accidentally
//modify this VAO, but this rarely happens. Modifying other VAOs requires a
//call to glBindVertexArray anyways so we generally don't unbind VAOs (nor
// VBOs) when it's not directly necessary.
glBindVertexArray(0);
return true;
}
void GLHelper::Cleanup()
{
glDeleteVertexArrays(1, &m_VAO);
glDeleteBuffers(1, &m_VBO);
glDeleteProgram(m_shaderProgram);
}
void GLHelper::Render()
{
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// draw our first triangle
glUseProgram(m_shaderProgram);
glBindVertexArray(m_VAO);
// seeing as we only have a single VAO there's no need to bind it every
//time, but we'll do so to keep things a bit more organized
glDrawArrays(GL_TRIANGLES, 0, 3);
}
void GLHelper::SetSize(int width, int height)
{
glViewport(0, 0, width, height);
}
And here is a short demo that uses this helper class to draw the trinagle.
demo.cpp
#include "wx/wx.h"
#include <wx/glcanvas.h>
#include "glhelper.h"
class wxGlewFrame: public wxFrame
{
public:
wxGlewFrame(wxWindow*);
~wxGlewFrame();
private:
void OnCanvasSize(wxSizeEvent&);
void OnCanvasPaint(wxPaintEvent&);
void InitGL();
wxGLCanvas* m_canvas;
wxGLContext* m_context;
GLHelper m_helper;
};
wxGlewFrame::wxGlewFrame(wxWindow* parent)
: wxFrame(parent, wxID_ANY, wxString())
{
// Create the canvas and context.
#if wxCHECK_VERSION(3,1,0)
// These settings should work with any GPU from the last 10 years.
wxGLAttributes dispAttrs;
dispAttrs.PlatformDefaults().RGBA().DoubleBuffer().EndList();
wxGLContextAttrs cxtAttrs;
cxtAttrs.PlatformDefaults().CoreProfile().OGLVersion(3, 3).EndList();
m_canvas = new wxGLCanvas(this, dispAttrs);
m_context = new wxGLContext(m_canvas, NULL, &cxtAttrs);
if ( !m_context->IsOK() )
{
SetTitle("Failed to create context.");
return;
}
#else
int dispAttrs[] = { WX_GL_RGBA, WX_GL_DOUBLEBUFFER, WX_GL_CORE_PROFILE,
WX_GL_MAJOR_VERSION ,3, WX_GL_MINOR_VERSION, 3, 0 };
m_canvas = new wxGLCanvas(this, wxID_ANY, dispAttrs);
m_context = new wxGLContext(m_canvas, NULL);
// Unfortunately, there doesn't seem to be any way to check if the
// context is ok prior to wxWidgets 3.1.0.
#endif // wxCHECK_VERSION
// On Linux, we must delay delay initialization until the canvas has
// been full created. On windows, we can finish now.
#ifdef __WXMSW__
InitGL();
#elif defined(__WXGTK__)
m_canvas->Bind(wxEVT_CREATE, [this](wxWindowCreateEvent&){InitGL();});
#endif // defined
}
wxGlewFrame::~wxGlewFrame()
{
m_helper.Cleanup();
delete m_context;
}
void wxGlewFrame::OnCanvasSize(wxSizeEvent& event)
{
wxSize sz = event.GetSize();
m_helper.SetSize(sz.GetWidth(), sz.GetHeight());
event.Skip();
}
void wxGlewFrame::OnCanvasPaint(wxPaintEvent&)
{
wxPaintDC dc(m_canvas);
m_helper.Render();
m_canvas->SwapBuffers();
}
void wxGlewFrame::InitGL()
{
// First call SetCurrent or GL initialization will fail.
m_context->SetCurrent(*m_canvas);
// Initialize GLEW.
bool glewInialized = m_helper.InitGlew();
if ( !glewInialized )
{
SetTitle("Failed it initialize GLEW.");
return;
}
SetTitle("Context and GLEW initialized.");
// Initialize the triangle data.
m_helper.InitData();
// Bind event handlers for the canvas. Binding was delayed until OpenGL was
// initialized because these handlers will need to call OpenGL functions.
m_canvas->Bind(wxEVT_SIZE, &wxGlewFrame::OnCanvasSize, this);
m_canvas->Bind(wxEVT_PAINT, &wxGlewFrame::OnCanvasPaint, this);
}
class MyApp : public wxApp
{
public:
virtual bool OnInit()
{
wxGlewFrame* frame = new wxGlewFrame(NULL);
frame->Show();
return true;
}
};
wxIMPLEMENT_APP(MyApp);
(This is basically the Hello Triangle sample from learnopengl.com except rewritten to use wxGLCanvas and GLEW instead of GLFW and GLAD.
This takes up about 19MB of memory on my system and only increases up to about 24 or 25MB when resizing. That might sound like a lot for such a simple program, but the running the official "Hello Triangle" sample uses 26MB. So I think the memory usage is about what should be expected.

OpenGL: vertexArray vs glBegin()

I am following some tutorial on OpenGL and am running into a problem. I constructed a class called Mesh that takes an array of vertices in its constructor and generates vertexarrays and such to do the drawing. The problem is is that I am not seeing anything. Here is the interface:
class Mesh
{
public:
Mesh(Vertex * vertices, size_t numVertices);
virtual ~Mesh();
void Draw();
private:
enum { POSITION_VB, NUM_BUFFERS };
GLuint m_vertexArrayObject;
GLuint m_vertexArrayBuffers;
size_t m_drawCount;
};
and here are the implementations
#include "mesh.h"
Mesh::Mesh(Vertex *vertices, size_t numVertices)
{
m_drawCount = numVertices;
glGenVertexArrays(1, &m_vertexArrayObject);
glBindVertexArray(m_vertexArrayObject);
glGenBuffers(NUM_BUFFERS, &m_vertexArrayBuffers);
glBindBuffer(GL_ARRAY_BUFFER, m_vertexArrayBuffers);
glBufferData(GL_ARRAY_BUFFER, numVertices * sizeof(vertices[0]), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
void Mesh::Draw()
{
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_TRIANGLES, 0, m_drawCount);
glBindVertexArray(0);
}
Mesh::~Mesh()
{
glDeleteVertexArrays(1, &m_vertexArrayObject);
}
The Vertex type is a simple class that looks as
class Vertex {
public:
Vertex(glm::vec3 const & pos) { this->pos = pos;}
private:
glm::vec3 pos;
};
If I change the implementation of Mesh::Draw() to
glBegin(GL_TRIANGLES);
glVertex3f(-1.0f, -0.25f, 0.0f); //triangle first vertex
glVertex3f(-0.5f, -0.25f, 0.0f); //triangle second vertex
glVertex3f(-0.75f, 0.25f, 0.0f); //triangle third vertex
glEnd(); //end drawing of triangles
I am getting a triangle printed to the screen. My question is: Does this necessarily mean that there is an error in the implementation of Mesh' member functions, and if so, can anyone spot it? I thought maybe the glBegin method bypasses some error somewhere else in the code that the vertexarray method cannot bypass. I would be grateful for any help. Also, I can post additional code if needed!
The shader code:
#version 120
void main()
{
gl_FragColor = vec4(1.0, 1.0, 0.0, 1.0);
}
More than likely your Mesh classes destructor is to blame:
Mesh::~Mesh()
{
glDeleteVertexArrays(1, &m_vertexArrayObject);
}
You have an implicit copy constructor, which does a byte-for-byte copy of your class's members whenever a copy of your Mesh is necessary. That byte-for-byte copy includes the name (m_vertexArrayObject) of an OpenGL-managed resource, which means you now have two distinct objects referencing the same resource.
As soon as one of these copies goes out of scope, it will delete the VAO that is still referenced by the original object.
The simplest way to solve this problem is to disable the copy constructor (C++11 has new syntax for this), then you will get a compiler error anytime a copy needs to be made.
private:
Mesh (const Mesh& original); // Copy ctor is inaccessible.
If you really do want to support multiple Mesh objects sharing the same Vertex Array Object, you will need to add reference counting and only free the VAO when the reference count reaches 0.

glBufferData with an array of verticies that have 3 floats (x, y and z) each

Currently I am trying to draw a triangle via a mesh class. I do this by first initializing the glew and the calling the Window.initializeGraphics method of my Window class, then creatubg an array of verticies and passing to my Mesh.addVerticies method. Each vertex has 3 floats, x, y, and z. Then I call the Mesh.draw method every tick of the main game loop.
initializeGraphics Method:
void Window::initializeGraphics()
{
glClearColor(0, 0, 0, 0);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glEnable(GL_FRAMEBUFFER_SRGB);
}
Creating the Mesh:
m_Mesh = Mesh();
Vertex data[] = { Vertex(vec3(-1, -1, 0)),
Vertex(vec3(1, -1, 0)),
Vertex(vec3(0, 1, 0)) };
m_Mesh.addVerticies(data);
Mesh Header:
#include "vertex.h"
#include <GLEW\glew>
class Mesh
{
private:
GLuint m_Vbo;
int m_Size;
public:
Mesh();
void addVerticies(Vertex verticies[]);
void draw();
};
Mesh C++ File:
#include "mesh.h"
Mesh::Mesh()
{
glGenBuffers(1, &m_Vbo);
}
void Mesh::addVerticies(Vertex verticies[])
{
m_Size = (sizeof(verticies) / sizeof(*verticies));
glBindBuffer(GL_ARRAY_BUFFER, m_Vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(verticies), verticies, GL_STATIC_DRAW);
}
void Mesh::draw()
{
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, m_Vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, 0);
glDrawArrays(GL_TRIANGLES, 0, m_Size);
glDisableVertexAttribArray(0);
}
Vertex Header:
#include "vec3.h"
struct Vertex
{
union
{
vec3 pos;
struct
{
float x, y, z;
};
};
Vertex(vec3 pos_);
};
Vertex C++ File:
#Include "vertex.h"
Vertex::Vertex(vec3 pos_)
{
pos = pos_;
}
Render Method:
void MainComponent::render()
{
m_Window.clear();
m_Mesh.draw();
m_Window.update();
}
//m_Window.update();
void Window::update()
{
glfwSwapBuffers(m_GLFWWindow);
glfwPollEvents();
}
//m_Window.clear();
void Window::clear()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
Problem is, no triangle appears on screen. What am I doing wrong ? I am pretty much still a C++ newling, and this is my first time programming OpenGl so it could be something really really basic that I keep overlooking.
Thanks in advance for your efforts.
-Sean
Woah, hold on there! There are other bona fide errors here.
void Mesh::addVerticies(Vertex verticies[])
{
// This gives the wrong answer!
m_Size = (sizeof(verticies) / sizeof(*verticies));
That calculation for m_Size is completely wrong. This is because verticies is a pointer not an array. It looks like an array, it was declared as Vertex verticies[], but due to a quirk of C++ (the same applies to C) the parameter decays into a pointer. So it ends up being the same as this:
void Mesh::addVerticies(Vertex *verticies)
As you can see, sizeof(verticies) is always going to be 8 on a typical 64-bit system, because you are just getting the size of a pointer, not the size of an array.
We can fix this by capturing the size of the array with a template:
template <std::size_t N>
void addVertices(Vertex (&vertices)[N]) {
addVertices(vertices, N);
}
void addVertices(Vertex *vertices, std::size_t count) {
m_Size = count;
glBindBuffer(GL_ARRAY_BUFFER, m_Vbo);
glBufferData(
GL_ARRAY_BUFFER,
sizeof(*vertices) * count,
vertices,
GL_STATIC_DRAW);
}
Because we use a reference to the array, Vertex (&vertices)[N], instead of the array directly, Vertex vertices[N], the parameter does not decay into a pointer.
I feel like a complete moron... One verticie was off the screen. I think I'll call it a day :/

Vertex Buffer issues

I recently created this VertexBuffer class to and added it to my opengl based engine; but it seems to fail in my main code, causing an "access violation at 0x00000000."...
Here is VertexBuffer.h :
#pragma once
#include "Type.h"
#include "Tags.h"
namespace Spiky {
namespace GLLayer {
template<>
struct GLObjDelAlloc<Tags::VertexBuffer> {
static void Allocate(GLTypeCT<GLType::Integer> count,
Tags::VertexBuffer::internal* names) {
glGenBuffers(count, names);
}
static void Delete(GLTypeCT<GLType::Integer> count, Tags::VertexBuffer::internal* names) {
glDeleteBuffers(count, names);
}
};
template<>
struct GLObjBind<Tags::VertexBuffer> {
private:
static Tags::VertexBuffer::internal _active;
public:
static const int ZERO_BUFFER = GL_NONE;
static void BindRequest(Tags::VertexBuffer::internal name = ZERO_BUFFER) {
if(_active != name) {
glBindBuffer(GL_ARRAY_BUFFER, 0);
_active = name;
}
}
};
Tags::VertexBuffer::internal
GLObjBind<Tags::VertexBuffer>::_active = 0;
//wrapper around GLObjDelAlloc & GLObjBind
class VertexBuffer {
public:
explicit VertexBuffer() {
GLObjDelAlloc<Tags::VertexBuffer>::Allocate(1, &handle_);
}
inline void Bind() {
GLObjBind<Tags::VertexBuffer>::BindRequest(handle_);
}
template<typename T>
static void BufferData(const GLTypeCT<GLType::Integer> count, GLTypeCT<T>* data) {
glBufferData(GL_ARRAY_BUFFER, count *
sizeof(data->stride), data, GL_STATIC_DRAW);
}
VertexBuffer& operator=(const VertexBuffer& other) = delete;
VertexBuffer(const VertexBuffer& other) = delete;
private:
Tags::VertexBuffer::internal handle_;
};
} //namespace GLLayer
} //namespace Spiky
And here is my main:
int main(int argc, char** args)
{
GLTypeCT<GLfloat> quadVertices[] = {
// Positions // Texture Coords
-1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f,
1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f,
};
// Setup plane VAO
glGenVertexArrays(1, &quadVAO);
VertexBuffer buffer{};
glBindVertexArray(quadVAO);
buffer.Bind();
VertexBuffer::BufferData(12, quadVertices);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat),
(GLvoid*)(0));
glBindVertexArray(0);
auto RenderQuad = [&] {
glBindVertexArray(quadVAO);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
};
//render calls later on...
}
Do you think the error comes from VertexBuffer ?
I see a few possible issues here.
But first, 1280 means GL_INVALID_ENUM, where did you get this? After which opengl call?
In this:
static void BindRequest(Tags::VertexBuffer::internal name = ZERO_BUFFER) {
if(_active != name) {
glBindBuffer(GL_ARRAY_BUFFER, 0);
_active = name;
}
You should probably pass glBindBuffer what you want to bind. glBindBuffer(GL_ARRAY_BUFFER, name)
I doubt this is causing it to crash but, when you call glVertexAttribPointer you're passing it 3 * sizeof(float) when it should be 6 * sizeof(float), why? Because the stride should be the size of your entire "Vertex" which in your case contains the positions and texcoords, which sums up to 6 floats.
And when you're calling glDrawArrays(GL_TRIANGLE_STRIP, 0, 4) you're saying that the count is 4, but as far as I can tell it's only 2.
The "access violation at 0x00000000", you get that if the function pointer to that function hasn't been retrieved, what are you using to get those pointers? I saw in the comments that you use SDL but that won't do any of that for you. You could do it yourself or use GLEW.

OpenGL Mesh Wrong Position

I am trying to create a simple triangle mesh and after figuring out why I just got a blanc screen (for some reason the x64 configuration was giving me problems) I am facing a new issue:
The position of my vertices don't turn out to be how I want them. And I have no idea why.
What I should be getting is a triangle which looks about like this:
What I am getting though is this:
I use GLEW 1.10.0 for loading OpenGL, GLM 0.9.5.4 for OpenGL math stuff and SDL 2.0.3 for window stuff. Everything running on Windows 8.1 in Visual Studio 2013 Ultimate with the latest Nvidia graphics drivers.
Main.cpp:
#include "Display.h"
#include "Shader.h"
#include "Mesh.h"
using namespace std;
int main(int argc, char** argv)
{
Display display(1200, 800, "Hello World");
Vertex vertecies[] =
{
Vertex(vec3(-0.5, -0.5, 0)),
Vertex(vec3(0.5, -0.5, 0)),
Vertex(vec3(0, 0.5, 0))
};
Mesh mesh(vertecies, sizeof(vertecies) / sizeof(vertecies[0]));
Shader shader(".\\res\\BasicShader");
while (!display.IsClosed())
{
display.Clear(1.0f, 1.0f, 1.0f, 1.0f);
shader.Bind();
mesh.Draw();
display.Update();
}
return 0;
}
Mesh.cpp:
#include "Mesh.h"
Mesh::Mesh(Vertex* vertecies, unsigned int numVertecies)
{
m_drawCount = numVertecies;
glGenVertexArrays(1, &m_vertexArrayObject);
glBindVertexArray(m_vertexArrayObject);
glGenBuffers(NUM_BUFFERS, m_vertexArrayBuffers);
glBindBuffer(GL_ARRAY_BUFFER, m_vertexArrayBuffers[POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertecies[0]) * numVertecies, vertecies, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const void*)offsetof(Vertex, m_pos));
glBindVertexArray(0);
}
Mesh::~Mesh()
{
glDeleteBuffers(NUM_BUFFERS, m_vertexArrayBuffers);
glDeleteVertexArrays(1, &m_vertexArrayObject);
}
void Mesh::Draw()
{
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_TRIANGLES, 0, m_drawCount);
glBindVertexArray(0);
}
Display.cpp:
#include "Display.h"
Display::Display(int width, int height, string title)
{
SDL_Init(SDL_INIT_EVERYTHING);
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 32);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
m_window = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, width, height, SDL_WINDOW_OPENGL);
m_glContext = SDL_GL_CreateContext(m_window);
m_isClosed = false;
GLenum status = glewInit();
if (status != GLEW_OK)
cerr << "Could not initialize GLEW!" << endl;
}
Display::~Display()
{
SDL_GL_DeleteContext(m_glContext);
SDL_DestroyWindow(m_window);
SDL_Quit();
}
void Display::Clear(float r, float g, float b, float a)
{
glClearColor(r, g, b, a);
glClear(GL_COLOR_BUFFER_BIT);
}
bool Display::IsClosed()
{
return m_isClosed;
}
void Display::Update()
{
SDL_GL_SwapWindow(m_window);
SDL_Event e;
while (SDL_PollEvent(&e))
{
if (e.type == SDL_QUIT)
m_isClosed = true;
}
}
Vertex Shader:
#version 420 core
layout(location = 0) in vec3 position;
void main()
{
gl_Position = vec4(position, 1.0);
}
Fragment Shader:
#version 420 core
out vec4 frag;
void main()
{
frag = vec4(1.0, 0.0, 0.0, 1.0);
}
Vertex.h:
#pragma once
#include <glm\glm.hpp>
using namespace glm;
struct Vertex
{
public:
Vertex(const vec3& pos);
virtual ~Vertex();
vec3 m_pos;
};
Vertex.cpp:
#include "Vertex.h"
Vertex::Vertex(const vec3& pos)
{
m_pos = pos;
}
Vertex::~Vertex()
{
}
EDIT: Everything has now been fixed.
This is probably a data alignment issue where your Vertex class is being padded. OpenGL would then interpret the padding bytes as valid data.
You can verify this by printing the result of sizeof(Vertex) which would be 8 (you mention a 64 platform) if it is indeed padded.
This tells OpenGL that there are floats tightly packed in memory, without padding:
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
A better way of setting the vertex pointer would be:
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (const void*)offsetof(Vertex, m_pos));
This also enables you to easily add more attributes.