Vertex Buffer issues - c++

I recently created this VertexBuffer class to and added it to my opengl based engine; but it seems to fail in my main code, causing an "access violation at 0x00000000."...
Here is VertexBuffer.h :
#pragma once
#include "Type.h"
#include "Tags.h"
namespace Spiky {
namespace GLLayer {
template<>
struct GLObjDelAlloc<Tags::VertexBuffer> {
static void Allocate(GLTypeCT<GLType::Integer> count,
Tags::VertexBuffer::internal* names) {
glGenBuffers(count, names);
}
static void Delete(GLTypeCT<GLType::Integer> count, Tags::VertexBuffer::internal* names) {
glDeleteBuffers(count, names);
}
};
template<>
struct GLObjBind<Tags::VertexBuffer> {
private:
static Tags::VertexBuffer::internal _active;
public:
static const int ZERO_BUFFER = GL_NONE;
static void BindRequest(Tags::VertexBuffer::internal name = ZERO_BUFFER) {
if(_active != name) {
glBindBuffer(GL_ARRAY_BUFFER, 0);
_active = name;
}
}
};
Tags::VertexBuffer::internal
GLObjBind<Tags::VertexBuffer>::_active = 0;
//wrapper around GLObjDelAlloc & GLObjBind
class VertexBuffer {
public:
explicit VertexBuffer() {
GLObjDelAlloc<Tags::VertexBuffer>::Allocate(1, &handle_);
}
inline void Bind() {
GLObjBind<Tags::VertexBuffer>::BindRequest(handle_);
}
template<typename T>
static void BufferData(const GLTypeCT<GLType::Integer> count, GLTypeCT<T>* data) {
glBufferData(GL_ARRAY_BUFFER, count *
sizeof(data->stride), data, GL_STATIC_DRAW);
}
VertexBuffer& operator=(const VertexBuffer& other) = delete;
VertexBuffer(const VertexBuffer& other) = delete;
private:
Tags::VertexBuffer::internal handle_;
};
} //namespace GLLayer
} //namespace Spiky
And here is my main:
int main(int argc, char** args)
{
GLTypeCT<GLfloat> quadVertices[] = {
// Positions // Texture Coords
-1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f,
1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f,
};
// Setup plane VAO
glGenVertexArrays(1, &quadVAO);
VertexBuffer buffer{};
glBindVertexArray(quadVAO);
buffer.Bind();
VertexBuffer::BufferData(12, quadVertices);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat),
(GLvoid*)(0));
glBindVertexArray(0);
auto RenderQuad = [&] {
glBindVertexArray(quadVAO);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
};
//render calls later on...
}
Do you think the error comes from VertexBuffer ?

I see a few possible issues here.
But first, 1280 means GL_INVALID_ENUM, where did you get this? After which opengl call?
In this:
static void BindRequest(Tags::VertexBuffer::internal name = ZERO_BUFFER) {
if(_active != name) {
glBindBuffer(GL_ARRAY_BUFFER, 0);
_active = name;
}
You should probably pass glBindBuffer what you want to bind. glBindBuffer(GL_ARRAY_BUFFER, name)
I doubt this is causing it to crash but, when you call glVertexAttribPointer you're passing it 3 * sizeof(float) when it should be 6 * sizeof(float), why? Because the stride should be the size of your entire "Vertex" which in your case contains the positions and texcoords, which sums up to 6 floats.
And when you're calling glDrawArrays(GL_TRIANGLE_STRIP, 0, 4) you're saying that the count is 4, but as far as I can tell it's only 2.
The "access violation at 0x00000000", you get that if the function pointer to that function hasn't been retrieved, what are you using to get those pointers? I saw in the comments that you use SDL but that won't do any of that for you. You could do it yourself or use GLEW.

Related

C++ Strange Access Violation with OpenGL [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 6 years ago.
Improve this question
I'm pretty new to C++ so I hope I can get some help here.
I try to port my Game Engine to C++ but C++ behaves a litle bit... "Strange".
Following Situation:
if I run test1() It all works as it should.
main.cpp
#include <iostream>
#include "../headers/base.h"
#include "../headers/DemoGame.h"
#include "../headers/TestShader.h"
using namespace std;
using namespace engine;
void run(TestShader* t, GLuint VAO, GLFWwindow* w)
{
glfwPollEvents();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(t->progID);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glfwSwapBuffers(w);
}
void test1()
{
Window w = Window(800, 600, "test");
TestShader t = TestShader();
GLuint VAO, VBO;
GLfloat vertices[9] = {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
while (!glfwWindowShouldClose(w.getWindow()))
{
run(&t, VAO, w.getWindow());
}
}
void test2()
{
DemoGame game = DemoGame();
game.start();
}
int main()
{
test1();
return 0;
}
If i'm running test2() with following involved classes:
Engine.h
#pragma once
#ifndef H_ENGINE
#define H_ENGINE
#include "base.h"
namespace engine
{
class Engine
{
private:
bool running;
public:
void start()
{
init();
process();
}
void stop()
{
this->running = false;
}
private:
void process()
{
update();
}
public:
virtual void init() = 0;
virtual void update() = 0;
virtual void render() = 0;
virtual void terminate() = 0;
};
}
#endif
DemoGame.h
#pragma once
#ifndef DEMO_DEMO_GAME
#define DEMO_DEMO_GAME
#include "base.h"
#include "Window.h"
#include "Engine.h"
#include "TestShader.h"
using namespace engine;
class DemoGame : public Engine
{
public:
Window* w;
TestShader* t;
GLuint VBO, VAO;
public:
DemoGame() : Engine() { }
public:
void init();
void update();
void render();
void terminate();
};
#endif
DemoGame.cpp
#include "../headers/DemoGame.h"
#include <iostream>
using namespace std;
void DemoGame::init()
{
cout << "ping" << endl;
Window wi = Window(800, 600, "test");
w = &wi;
TestShader te = TestShader();
t = &te;
GLfloat vertices[9] = {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
while (!glfwWindowShouldClose(w->getWindow()))
{
render();
}
}
void DemoGame::update()
{
}
void DemoGame::render()
{
glfwPollEvents();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(t->progID);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glfwSwapBuffers(w->getWindow());
}
void DemoGame::terminate()
{
}
It works aswell. But as you may see Engine.h is supposed to control the mainloop. If I change the code a little bit:
Engine.h
#pragma once
#ifndef H_ENGINE
#define H_ENGINE
#include "base.h"
namespace engine
{
class Engine
{
private:
bool running;
public:
void start()
{
init();
running = true;
while (running)
{
process();
}
}
void stop()
{
this->running = false;
}
private:
void process()
{
update();
}
public:
virtual void init() = 0;
virtual void update() = 0;
virtual void render() = 0;
virtual void terminate() = 0;
};
}
#endif
DemoGame.cpp
#include "../headers/DemoGame.h"
#include <iostream>
using namespace std;
void DemoGame::init()
{
cout << "ping" << endl;
Window wi = Window(800, 600, "test");
w = &wi;
TestShader te = TestShader();
t = &te;
GLfloat vertices[9] = {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void DemoGame::update()
{
render();
}
void DemoGame::render()
{
glfwPollEvents();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(t->progID);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glfwSwapBuffers(w->getWindow());
}
void DemoGame::terminate()
{
}
Now all of a sudden I get an "Access Violation". The question is why?
The file "base.h" just contains
#define GLEW_STATIC
#include "GL/glew.h"
#include "GLFW/glfw3.h"
and the classes Window and TestShader shouldn't matter because they work in the first two examples. As I stated before I'm pretty new to C++ and I just don't understand why that doesn't work. Can you please help me finding out at least why that doesn't work or better help me solving the problem.
This is my second attempt to get a useful answer from StackOverflow by posting a question. Please do me a favour. Please consider read the situation before you mark this question an duplicate. The last time it wasn't an duplicate the problem was by far different.
Edit
As requested the Error message(sry I'm at work so the language is german)
Ausnahme ausgelöst bei 0x0126489D in GLFWGame.exe: 0xC0000005:
Zugriffsverletzung beim Lesen an Position 0xCCCCCEA4.
Falls ein Handler für diese Ausnahme vorhanden ist, kann das Programm
möglicherweise weiterhin sicher ausgeführt werden.
And I'll try to shorten the code to the most important.
You store addresses of stack objects that get deleted. For example,
Window wi = Window(800, 600, "test");
w = &wi;
Creates a local variable wi on the stack, which gets deleted automatically when it goes out of scope (which is the case at the end of the function). After that, w will point to an address that is already freed, which will lead to big troubles when you try to access this variables later on as you do here:
glfwSwapBuffers(w->getWindow());
If you want to create the window object on the heap, you have to use the following code in DemoGame::init():
w = new Window(800, 600, "test");
Don't forget to delete this object manually by calling delete w when you don't need it anymore. The same problem also occures for the TestShader instance.
Side note: Window wi = Window(800, 600, "test"); is still a strange syntax when creating objects on the stack. The correct way would be Window wi(800, 600, "test"); Have a look at this posts for why this makes a difference: Calling constructors in c++ without new
Edit: Your first example just works because you are calling the render function inside the init function, thus the objects do not get out of scope. Storing pointers to local object is still not good practice.
Your problem is here:
Window wi = Window(800, 600, "test");
w = &wi;
TestShader te = TestShader();
t = &te;
Both, the instance of Window as well as the instance of TestShader are local variables that will get cleaned up as soon as they go out of scope (end of init) and hence remembering their addresses has no meaning. You will need to create those instances dynamically (new) or set them up within your class definition.

std::vector and VBOs render only the last shape [duplicate]

This question already has an answer here:
Mesh class called with default constructor not working OpenGL C++
(1 answer)
Closed 6 years ago.
I'm running through a weird problem. Basically I have Mesh class depending on a flag, I can draw a point, a line, or a triangle. For example, if I want to draw two lines, I can do the following
Vertex vertices1[] = {
Vertex(glm::vec3(-.5, -.5, 0)),
Vertex(glm::vec3( 0, .5, 0))
};
Vertex vertices2[] = {
Vertex(glm::vec3( .5, -.5, 0)),
Vertex(glm::vec3( -.5, .5, 0))
};
Mesh mesh1(vertices1, sizeof(vertices1)/sizeof(vertices1[0]), 'L');
Mesh mesh2(vertices2, sizeof(vertices2)/sizeof(vertices2[0]), 'L');
// Rendering Loop:
while( Window.isOpen() ){
...
//================( Rendering )=========================
ourShader.Use();
mesh1.draw();
mesh2.draw();
//======================================================
...
}
The result is
Now I would like to use std::vector<Mesh> and loop through meshes. My attempt is as follows
std::vector<Mesh> meshes;
meshes.push_back(mesh1);
meshes.push_back(mesh2);
while( Window.isOpen() ){
...
//================( Rendering )=========================
ourShader.Use();
for ( int i(0); i < meshes.size(); ++i )
meshes[i].draw();
//======================================================
...
}
With the preceding approach, only the last line is drawn and this is the result
Moreover, once I use .push_back() even if I don't loop through the vector, the last line is drawn. I don't understand why using std::vector deteriorates the rendering. I even tried meshes[0].draw() but with no luck. Any suggestions?
Edit:
This is the constructor of Mesh class
#include <iostream>
#include <vector>
#include <glm/glm.hpp>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include "display.h"
#include "keyboard.h"
#include "shader.h"
class Vertex
{
public:
Vertex(const glm::vec3& p) : m_position(p)
{}
private:
glm::vec3 m_position;
};
class Mesh
{
public:
Mesh(Vertex* vertices, unsigned int numVertices, const char& flag);
~Mesh();
void draw();
private:
enum{
POSITION_VB,
NUM_BUFFERS
};
GLuint m_vertexArrayObject;
GLuint m_vertexArrayBuffers[NUM_BUFFERS];
unsigned int m_drawCount;
char m_flag;
};
Mesh::Mesh(Vertex* vertices, unsigned int numVertices, const char& flag) : m_flag(flag), m_drawCount(numVertices)
{
glGenVertexArrays(1, &m_vertexArrayObject);
glBindVertexArray(m_vertexArrayObject);
glGenBuffers(NUM_BUFFERS, m_vertexArrayBuffers);
glBindBuffer(GL_ARRAY_BUFFER, m_vertexArrayBuffers[POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, numVertices*sizeof(vertices[0]), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
Mesh::~Mesh()
{
glDeleteVertexArrays(1, &m_vertexArrayObject);
glDeleteBuffers(1, m_vertexArrayBuffers);
}
void Mesh::draw()
{
switch(m_flag)
{
case 'P':
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_POINTS, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'L':
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_LINES, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'T':
glBindVertexArray(m_vertexArrayObject);
glDrawArrays(GL_TRIANGLES, 0, m_drawCount);
glBindVertexArray(0);
break;
}
}
int main(void)
{
Display Window(800, 600, "OpenGL Window");
Keyboard myKeyboard( Window.getWindowPointer() );
Vertex vertices1[] = {
Vertex(glm::vec3(-.5, -.5, 0)),
Vertex(glm::vec3( 0, .5, 0))
};
Vertex vertices2[] = {
Vertex(glm::vec3( .5, -.5, 0)),
Vertex(glm::vec3( -.5, .5, 0))
};
Mesh mesh1(vertices1, sizeof(vertices1)/sizeof(vertices1[0]), 'L');
Mesh mesh2(vertices2, sizeof(vertices2)/sizeof(vertices2[0]), 'L');
std::vector<Mesh> meshes;
meshes.emplace_back(mesh1);
meshes.emplace_back(mesh2);
std::cout << meshes.size() << std::endl;
//*****************( SHADER )************************
Shader ourShader("shader.vs", "shader.frag");
glEnable(GL_PROGRAM_POINT_SIZE);
while( Window.isOpen() ){
Window.PollEvents();
Window.clear();
//================( Rendering )=========================
ourShader.Use();
//mesh1.draw();
//mesh2.draw();
for ( int i(0); i < meshes.size(); ++i )
meshes[i].draw();
//meshes[0].draw();
//meshes[1].draw();
//======================================================
Window.SwapBuffers();
}
glfwTerminate();
return 0;
}
Shaders
#version 330 core
out vec4 color;
void main()
{
color = vec4(1.0f,0.5f,0.2f,1.0f);
}
#version 330 core
layout (location = 0) in vec3 position;
void main()
{
gl_PointSize = 10.0;
gl_Position = vec4(position, 1.0);
}
As I suspected, the problem is with the (lack of) copy constructor. The default one just copies all the members. As a result your VAOs and buffers get deleted multiple times, even before you manage to draw anything (vectors move during reallocation, and if they can't move they copy). As a rule of thumb: if you have a non-default destructor, you must implement also a copy constructor and an assignment operator, or explicitly delete them if your class is not meant to be copyable.
For your concrete case the solutions are:
Quick solution: store pointers to meshes in the vector:
std::vector<Mesh*> meshes;
meshes.emplace_back(&mesh1);
meshes.emplace_back(&mesh2);
Correct solution: use proper RAII for resource management. Using the unique_ptr technique from here your MCVE code becomes:
class Mesh
{
public:
Mesh(Vertex* vertices, unsigned int numVertices, const char& flag);
void draw();
private:
//...
GLvertexarray m_vertexArrayObject;
GLbuffer m_vertexArrayBuffers[NUM_BUFFERS];
unsigned int m_drawCount;
char m_flag;
};
Mesh::Mesh(Vertex* vertices, unsigned int numVertices, const char& flag) : m_flag(flag), m_drawCount(numVertices)
{
GLuint id;
glGenVertexArrays(1, &id);
glBindVertexArray(id);
m_vertexArrayObject.reset(id);
for(int i = 0; i < NUM_BUFFERS; ++i)
{
glGenBuffers(1, &id);
glBindBuffer(GL_ARRAY_BUFFER, id);
m_vertexArrayBuffers[i].reset(id);
glBufferData(GL_ARRAY_BUFFER, numVertices*sizeof(vertices[0]), vertices, GL_STATIC_DRAW);
}
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
void Mesh::draw()
{
switch(m_flag)
{
case 'P':
glBindVertexArray(m_vertexArrayObject.get());
glDrawArrays(GL_POINTS, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'L':
glBindVertexArray(m_vertexArrayObject.get());
glDrawArrays(GL_LINES, 0, m_drawCount);
glBindVertexArray(0);
break;
case 'T':
glBindVertexArray(m_vertexArrayObject.get());
glDrawArrays(GL_TRIANGLES, 0, m_drawCount);
glBindVertexArray(0);
break;
}
}
int main()
{
//...
Mesh mesh1(vertices1, sizeof(vertices1)/sizeof(vertices1[0]), 'L');
Mesh mesh2(vertices2, sizeof(vertices2)/sizeof(vertices2[0]), 'L');
std::vector<Mesh> meshes;
meshes.emplace_back(std::move(mesh1));
meshes.emplace_back(std::move(mesh2));
// ...
return 0;
}
Notice how there is no more need for defining a destructor, and your class automatically becomes movable but not copyable. Furthermore, if you have OpenGL 4.5 or ARB_direct_state_access then things get even simpler.
EDIT
The main problem is, that the destructor is called when you add the Mesh objects to the vector, therefore the underlying data gets cleaned up.
Further reading: Why does my class's destructor get called when I add instances to a vector? | What is The Rule of Three?
I'd personally create separate init_buffers and free_buffers methods to my Mesh class and use them appropriately. (Initialize buffers after the OpenGL context is obtained, free the buffers when the window is closed.)
This way you can start building meshes (and add them to the scene) before actually having the OpenGL context.
I've implemented the missing code parts and tried your code using GLFW using CLion.
It works. See code / CLion project here: OpenGLSandbox/main.cpp
The only code I've added are basically these, so it's your turn to figure out the difference / error.
// Constants
const size_t NUM_BUFFERS = 1;
const size_t POSITION_VB = 0;
// Vertex class
class Vertex {
private:
glm::vec3 mCoords;
public:
Vertex(glm::vec3 coords) : mCoords(coords) {};
};
// Mesh class
class Mesh {
private:
GLuint m_vertexArrayObject;
char m_flag;
unsigned int m_drawCount;
GLuint m_vertexArrayBuffers[NUM_BUFFERS];
public:
/* your ctor and draw method */
}

Program crash when calling OpenGL functions

I'm trying to setup a game engine project. My visual studio project is setup so that I have an 'engine' project separate from my 'game' project. Then engine project is being compiled to a dll for the game project to use. I've already downloaded and setup glfw and glew to start using openGL. My problem is when ever I hit my first openGL function the program crashes. I know this has something to do with glewinit even though glew IS initializing successfully (no console errors). In my engine project, I have a window class where, upon window construction, glew should be setup:
Window.h
#pragma once
#include "GL\glew.h"
#include "GLFW\glfw3.h"
#if (_DEBUG)
#define LOG(x) printf(x)
#else
#define LOG(x)
#endif
namespace BlazeGraphics
{
class __declspec(dllexport) Window
{
public:
Window(short width, short height, const char* title);
~Window();
void Update();
void Clear() const;
bool Closed() const;
private:
int m_height;
int m_width;
const char* m_title;
GLFWwindow* m_window;
private:
Window(const Window& copy) {}
void operator=(const Window& copy) {}
};
}
Window.cpp (where glewinit() is called)
#include "Window.h"
#include <cstdio>
namespace BlazeGraphics
{
//Needed to define outside of the window class (not sure exactly why yet)
void WindowResize(GLFWwindow* window, int width, int height);
Window::Window(short width, short height, const char* title) :
m_width(width),
m_height(height),
m_title(title)
{
//InitializeWindow
{
if (!glfwInit())
{
LOG("Failed to initialize glfw!");
return;
};
m_window = glfwCreateWindow(m_width, m_height, m_title, NULL, NULL);
if (!m_window)
{
LOG("Failed to initialize glfw window!");
glfwTerminate();
return;
};
glfwMakeContextCurrent(m_window);
glfwSetWindowSizeCallback(m_window, WindowResize);
}
//IntializeGl
{
//This needs to be after two functions above (makecontextcurrent and setwindowresizecallback) or else glew will not initialize
**if (glewInit() != GLEW_OK)
{
LOG("Failed to initialize glew!");
}**
}
}
Window::~Window()
{
glfwTerminate();
}
void Window::Update()
{
glfwPollEvents();
glfwSwapBuffers(m_window);
}
void Window::Clear() const
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
//Returns a bool because glfwwindowShouldClose returns a nonzero number or zero
bool Window::Closed() const
{
//Made it equal to 1 to take away warning involving converting an int to bool
return glfwWindowShouldClose(m_window) == 1;
}
//Not part of window class so defined above
void WindowResize(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
}
Here is my main.cpp file which is found within my game project where I currently have my openGL functionality in global functions (just for now):
main.cpp
#include <iostream>
#include <array>
#include <fstream>
#include "GL\glew.h"
#include "GLFW\glfw3.h"
#include "../Engine/Source/Graphics/Window.h"
void initializeGLBuffers()
{
GLfloat triangle[] =
{
+0.0f, +0.1f, -0.0f,
0.0f, 1.0f, 0.0f,
-0.1f, -0.1f, 0.0f, //1
0.0f, 1.0f, 0.0f,
+0.1f, -0.1f, 0.0f, //2
0.0f, 1.0f, 0.0f,
};
GLuint bufferID;
glGenBuffers(1, &bufferID);
glBindBuffer(GL_ARRAY_BUFFER, bufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(triangle), triangle, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, (sizeof(GLfloat)) * 6, nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, (sizeof(GLfloat)) * 6, (char*)((sizeof(GLfloat)) * 3));
GLushort indices[] =
{
0,1,2
};
GLuint indexBufferID;
glGenBuffers(1, &indexBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
};
void installShaders()
{
//Create Shader
GLuint vertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
//Add source or text file to shader object
std::string temp = readShaderCode("VertexShaderCode.glsl");
const GLchar* adapter[1];
adapter[0] = temp.c_str();
glShaderSource(vertexShaderID, 1, adapter, 0);
temp = readShaderCode("FragmentShaderCode.glsl").c_str();
adapter[0] = temp.c_str();
glShaderSource(FragmentShaderID, 1, adapter, 0);
//Compile Shadaer
glCompileShader(vertexShaderID);
glCompileShader(FragmentShaderID);
if (!checkShaderStatus(vertexShaderID) || !checkShaderStatus(FragmentShaderID))
return;
//Create Program
GLuint programID = glCreateProgram();
glAttachShader(programID, vertexShaderID);
glAttachShader(programID, FragmentShaderID);
//Link Program
glLinkProgram(programID);
if (!checkProgramStatus(programID))
{
std::cout << "Failed to link program";
return;
}
//Use program
glUseProgram(programID);
}
int main()
{
BlazeGraphics::Window window(1280, 720, "MyGame");
initializeGLBuffers();
installShaders();
while (!window.Closed())
{
window.Clear();
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_SHORT, 0);
window.Update();
};
return 0;
}
Now if I were to move the glewinit() code here in my main.cpp:
int main()
{
BlazeGraphics::Window window(1280, 720, "MyGame");
if (glewInit() != GLEW_OK)
{
LOG("Failed to initialize glew!");
}
initializeGLBuffers();
installShaders();
while (!window.Closed())
{
window.Clear();
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_SHORT, 0);
window.Update();
};
return 0;
}
then my program compiles fine. Why does trying to initialize glew within engine.dll cause a program crash? Thanks for any help.
GLEW works by defining a function pointer as global variable for each OpenGL function. Let's look at glBindBuffer as an example:
#define GLEW_FUN_EXPORT GLEWAPI
typedef void (GLAPIENTRY * PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer);
GLEW_FUN_EXPORT PFNGLBINDBUFFERPROC __glewBindBuffer;
So we just have a __glewBindBuffer function pointer, which will be set to the correct address from your OpenGL implementation by glewInit.
To actually be able to write glBindBuffer, GLEW simply defines pre-processor macros mapping the GL functions to those function pointer variables:
#define glBindBuffer GLEW_GET_FUN(__glewBindBuffer);
Why does trying to initialize glew within engine.dll cause a program crash?
Because your engine.dll and your main application each have a separate set of all of these global variables. You would have to export all the __glew* variables from your engine DLL to be able to get access to the results of your glewInit call in engine.dll.

glBufferData with an array of verticies that have 3 floats (x, y and z) each

Currently I am trying to draw a triangle via a mesh class. I do this by first initializing the glew and the calling the Window.initializeGraphics method of my Window class, then creatubg an array of verticies and passing to my Mesh.addVerticies method. Each vertex has 3 floats, x, y, and z. Then I call the Mesh.draw method every tick of the main game loop.
initializeGraphics Method:
void Window::initializeGraphics()
{
glClearColor(0, 0, 0, 0);
glFrontFace(GL_CW);
glCullFace(GL_BACK);
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST);
glEnable(GL_FRAMEBUFFER_SRGB);
}
Creating the Mesh:
m_Mesh = Mesh();
Vertex data[] = { Vertex(vec3(-1, -1, 0)),
Vertex(vec3(1, -1, 0)),
Vertex(vec3(0, 1, 0)) };
m_Mesh.addVerticies(data);
Mesh Header:
#include "vertex.h"
#include <GLEW\glew>
class Mesh
{
private:
GLuint m_Vbo;
int m_Size;
public:
Mesh();
void addVerticies(Vertex verticies[]);
void draw();
};
Mesh C++ File:
#include "mesh.h"
Mesh::Mesh()
{
glGenBuffers(1, &m_Vbo);
}
void Mesh::addVerticies(Vertex verticies[])
{
m_Size = (sizeof(verticies) / sizeof(*verticies));
glBindBuffer(GL_ARRAY_BUFFER, m_Vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(verticies), verticies, GL_STATIC_DRAW);
}
void Mesh::draw()
{
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, m_Vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * 4, 0);
glDrawArrays(GL_TRIANGLES, 0, m_Size);
glDisableVertexAttribArray(0);
}
Vertex Header:
#include "vec3.h"
struct Vertex
{
union
{
vec3 pos;
struct
{
float x, y, z;
};
};
Vertex(vec3 pos_);
};
Vertex C++ File:
#Include "vertex.h"
Vertex::Vertex(vec3 pos_)
{
pos = pos_;
}
Render Method:
void MainComponent::render()
{
m_Window.clear();
m_Mesh.draw();
m_Window.update();
}
//m_Window.update();
void Window::update()
{
glfwSwapBuffers(m_GLFWWindow);
glfwPollEvents();
}
//m_Window.clear();
void Window::clear()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
Problem is, no triangle appears on screen. What am I doing wrong ? I am pretty much still a C++ newling, and this is my first time programming OpenGl so it could be something really really basic that I keep overlooking.
Thanks in advance for your efforts.
-Sean
Woah, hold on there! There are other bona fide errors here.
void Mesh::addVerticies(Vertex verticies[])
{
// This gives the wrong answer!
m_Size = (sizeof(verticies) / sizeof(*verticies));
That calculation for m_Size is completely wrong. This is because verticies is a pointer not an array. It looks like an array, it was declared as Vertex verticies[], but due to a quirk of C++ (the same applies to C) the parameter decays into a pointer. So it ends up being the same as this:
void Mesh::addVerticies(Vertex *verticies)
As you can see, sizeof(verticies) is always going to be 8 on a typical 64-bit system, because you are just getting the size of a pointer, not the size of an array.
We can fix this by capturing the size of the array with a template:
template <std::size_t N>
void addVertices(Vertex (&vertices)[N]) {
addVertices(vertices, N);
}
void addVertices(Vertex *vertices, std::size_t count) {
m_Size = count;
glBindBuffer(GL_ARRAY_BUFFER, m_Vbo);
glBufferData(
GL_ARRAY_BUFFER,
sizeof(*vertices) * count,
vertices,
GL_STATIC_DRAW);
}
Because we use a reference to the array, Vertex (&vertices)[N], instead of the array directly, Vertex vertices[N], the parameter does not decay into a pointer.
I feel like a complete moron... One verticie was off the screen. I think I'll call it a day :/

OpenGL shader won't render Triangle

i'm working in this project (https://github.com/lupeeumeeu/WorldCraft), everything works fine, i can change the background color, but it doesn't render the triangle, i tried to set tons and tons of breakpoints, but couldn't find any problem, i think the problem should be around trhe triangle.cpp or the managers, i tried to follow the in2gpu's tutorial but i also modified a little bit.
An image link to explain just a bit:
https://imgur.com/a/HN7t2
Main.cpp:
#include "..\WorldCraft\Core\Init\Init.h"
#include "Core\Managers\Scene_Manager.h"
#include "Core\Render\Triangle.h"
using namespace Core;
using namespace Init;
int main(int argc, char **argv)
{
WindowConfig windowconfig(std::string("WorldCraft"), 800, 600, 400, 200, true);//name, x, y, w, h, reshape
OpenGLVersion version(4, 5, true);//M.m version opengl, msaa
BufferConfig bufferconfig(true, true, true, true); // Buffers
Core::Init::Init::Initialize(windowconfig, version, bufferconfig, argc, argv);
Core::Managers::Scene_Manager* mainMenu = new Core::Managers::Scene_Manager();
Core::Init::Init::SetListener(mainMenu);
Core::Render::Triangle* triangle = new Core::Render::Triangle();
triangle->SetProgram(Core::Managers::Shader_Manager::GetShader("CommonShader"));
triangle->Create();
mainMenu->GetModels_Manager()->SetModel("triangle", triangle);
Core::Init::Init::Run();
delete mainMenu;
return 0;
}
Scene_Manager.cpp:
#include "Scene_Manager.h"
using namespace Core;
using namespace Managers;
Scene_Manager::Scene_Manager()
{
glEnable(GL_DEPTH_TEST);
shader_manager = new Shader_Manager();
shader_manager->CreateProgram("CommonShader", "Core//Shaders//Common//Vertex_Shader.glsl"
, "Core//Shaders//Common//Fragment_Shader.glsl");
view_matrix = glm::mat4(1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, -1.0f, 0.0f,
0.0f, 0.0f, 10.0f, 1.0f);
models_manager = new Models_Manager();
}
Scene_Manager::~Scene_Manager()
{
delete shader_manager;
delete models_manager;
}
void Scene_Manager::NotifyBeginFrame()
{
models_manager->Update();
}
void Scene_Manager::NotifyDisplayFrame()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(0.0, 0.0, 0.0, 1.0);
models_manager->Draw();
models_manager->Draw(projection_matrix, view_matrix);
}
void Scene_Manager::NotifyEndFrame()
{
}
void Scene_Manager::NotifyReshape(int width, int height, int previous_width, int previous_height)
{
float ar = (float)glutGet(GLUT_WINDOW_WIDTH) / (float)glutGet(GLUT_WINDOW_HEIGHT);
float angle = 45.0f, near1 = 0.1f, far1 = 2000.0f;
projection_matrix[0][0] = 1.0f / (ar * tan(angle / 2.0f));
projection_matrix[1][1] = 1.0f / tan(angle / 2.0f);
projection_matrix[2][2] = (-near1 - far1) / (near1 - far1);
projection_matrix[2][3] = 1.0f;
projection_matrix[3][2] = 2.0f * near1 * far1 / (near1 - far1);
}
Core::Managers::Models_Manager* Scene_Manager::GetModels_Manager()
{
return models_manager;
}
Scene_Manager.h:
#pragma once
#include "Models_Manager.h"
#include "Shader_Manager.h"
#include "../Init/FrameNotifier.h"
namespace Core
{
namespace Managers
{
class Scene_Manager : public Init::FrameNotifier
{
public:
Scene_Manager();
~Scene_Manager();
virtual void NotifyBeginFrame();
virtual void NotifyDisplayFrame();
virtual void NotifyEndFrame();
virtual void NotifyReshape(int width, int height, int previous_width, int previous_height);
Managers::Models_Manager* GetModels_Manager();
private:
Core::Managers::Shader_Manager* shader_manager;
Core::Managers::Models_Manager* models_manager;
glm::mat4 projection_matrix;
glm::mat4 view_matrix;
};
}
}
Models_Manager.cpp:
#include "Models_Manager.h"
using namespace Core::Managers;
using namespace Core::Render;
Models_Manager::Models_Manager()
{
Triangle* triangle = new Triangle();
triangle->SetProgram(Shader_Manager::GetShader("CommonShader"));
triangle->Create();
gameModelList_NDC["triangle"] = triangle;
}
Models_Manager::~Models_Manager()
{
for (auto model : gameModelList)
{
delete model.second;
}
gameModelList.clear();
for (auto model : gameModelList_NDC)
{
delete model.second;
}
gameModelList_NDC.clear();
}
void Models_Manager::Update()
{
for (auto model : gameModelList)
{
model.second->Update();
}
for (auto model : gameModelList_NDC)
{
model.second->Update();
}
}
void Models_Manager::Draw()
{
for (auto model : gameModelList_NDC)
{
model.second->Draw();
}
}
void Models_Manager::Draw(const glm::mat4& projection_matrix, const glm::mat4& view_matrix)
{
for (auto model : gameModelList)
{
model.second->Draw(projection_matrix, view_matrix);
}
}
void Models_Manager::DeleteModel(const std::string& gameModelName)
{
IGameObject* model = gameModelList[gameModelName];
model->Destroy();
gameModelList.erase(gameModelName);
}
void Models_Manager::DeleteModel_NDC(const std::string& gameModelName)
{
IGameObject* model = gameModelList_NDC[gameModelName];
model->Destroy();
gameModelList_NDC.erase(gameModelName);
}
const IGameObject& Models_Manager::GetModel(const std::string& gameModelName) const
{
return (*gameModelList.at(gameModelName));
}
const IGameObject& Models_Manager::GetModel_NDC(const std::string& gameModelName) const
{
return (*gameModelList_NDC.at(gameModelName));
}
void Models_Manager::SetModel(const std::string& gameObjectName, IGameObject* gameObject)
{
gameModelList[gameObjectName.c_str()] = gameObject;
}
Triangle.cpp:
#include "Triangle.h"
using namespace Core;
using namespace Render;
Triangle::Triangle(){}
Triangle::~Triangle(){}
static void PrintError(GLenum errorCode)
{
switch (errorCode)
{
case GL_NO_ERROR:
break;
case GL_INVALID_ENUM:
std::cout << "An unacceptable value is specified for an enumerated argument.";
break;
case GL_INVALID_VALUE:
std::cout << "A numeric argument is out of range.";
break;
default:
break;
}
}
void Triangle::Create()
{
GLuint vao;
GLuint vbo;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
std::vector<VertexFormat> vertices;
vertices.push_back(VertexFormat(glm::vec3(0.25, -0.25, -1.0), glm::vec4(1, 0, 0, 1)));
vertices.push_back(VertexFormat(glm::vec3(-0.25, -0.25, -1.0), glm::vec4(0, 1, 0, 1)));
vertices.push_back(VertexFormat(glm::vec3(0.25, 0.25, -1.0), glm::vec4(0, 0, 1, 1)));
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
PrintError(glGetError());
glBufferData(GL_ARRAY_BUFFER, sizeof(VertexFormat) * 3, &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(VertexFormat), (void*)0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, sizeof(VertexFormat), (void*)(offsetof(VertexFormat, VertexFormat::color)));
glBindVertexArray(0);
this->vao = vao;
this->vbos.push_back(vbo);
}
void Triangle::Update() {}
void Triangle::Draw()
{
glUseProgram(program);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
I found the error, it was the duplicated triangle, without the camera configuration (view_matrix and projection_matrix)