C++ Strange Access Violation with OpenGL [closed] - c++

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 6 years ago.
Improve this question
I'm pretty new to C++ so I hope I can get some help here.
I try to port my Game Engine to C++ but C++ behaves a litle bit... "Strange".
Following Situation:
if I run test1() It all works as it should.
main.cpp
#include <iostream>
#include "../headers/base.h"
#include "../headers/DemoGame.h"
#include "../headers/TestShader.h"
using namespace std;
using namespace engine;
void run(TestShader* t, GLuint VAO, GLFWwindow* w)
{
glfwPollEvents();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(t->progID);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glfwSwapBuffers(w);
}
void test1()
{
Window w = Window(800, 600, "test");
TestShader t = TestShader();
GLuint VAO, VBO;
GLfloat vertices[9] = {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
while (!glfwWindowShouldClose(w.getWindow()))
{
run(&t, VAO, w.getWindow());
}
}
void test2()
{
DemoGame game = DemoGame();
game.start();
}
int main()
{
test1();
return 0;
}
If i'm running test2() with following involved classes:
Engine.h
#pragma once
#ifndef H_ENGINE
#define H_ENGINE
#include "base.h"
namespace engine
{
class Engine
{
private:
bool running;
public:
void start()
{
init();
process();
}
void stop()
{
this->running = false;
}
private:
void process()
{
update();
}
public:
virtual void init() = 0;
virtual void update() = 0;
virtual void render() = 0;
virtual void terminate() = 0;
};
}
#endif
DemoGame.h
#pragma once
#ifndef DEMO_DEMO_GAME
#define DEMO_DEMO_GAME
#include "base.h"
#include "Window.h"
#include "Engine.h"
#include "TestShader.h"
using namespace engine;
class DemoGame : public Engine
{
public:
Window* w;
TestShader* t;
GLuint VBO, VAO;
public:
DemoGame() : Engine() { }
public:
void init();
void update();
void render();
void terminate();
};
#endif
DemoGame.cpp
#include "../headers/DemoGame.h"
#include <iostream>
using namespace std;
void DemoGame::init()
{
cout << "ping" << endl;
Window wi = Window(800, 600, "test");
w = &wi;
TestShader te = TestShader();
t = &te;
GLfloat vertices[9] = {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
while (!glfwWindowShouldClose(w->getWindow()))
{
render();
}
}
void DemoGame::update()
{
}
void DemoGame::render()
{
glfwPollEvents();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(t->progID);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glfwSwapBuffers(w->getWindow());
}
void DemoGame::terminate()
{
}
It works aswell. But as you may see Engine.h is supposed to control the mainloop. If I change the code a little bit:
Engine.h
#pragma once
#ifndef H_ENGINE
#define H_ENGINE
#include "base.h"
namespace engine
{
class Engine
{
private:
bool running;
public:
void start()
{
init();
running = true;
while (running)
{
process();
}
}
void stop()
{
this->running = false;
}
private:
void process()
{
update();
}
public:
virtual void init() = 0;
virtual void update() = 0;
virtual void render() = 0;
virtual void terminate() = 0;
};
}
#endif
DemoGame.cpp
#include "../headers/DemoGame.h"
#include <iostream>
using namespace std;
void DemoGame::init()
{
cout << "ping" << endl;
Window wi = Window(800, 600, "test");
w = &wi;
TestShader te = TestShader();
t = &te;
GLfloat vertices[9] = {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
void DemoGame::update()
{
render();
}
void DemoGame::render()
{
glfwPollEvents();
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(t->progID);
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glfwSwapBuffers(w->getWindow());
}
void DemoGame::terminate()
{
}
Now all of a sudden I get an "Access Violation". The question is why?
The file "base.h" just contains
#define GLEW_STATIC
#include "GL/glew.h"
#include "GLFW/glfw3.h"
and the classes Window and TestShader shouldn't matter because they work in the first two examples. As I stated before I'm pretty new to C++ and I just don't understand why that doesn't work. Can you please help me finding out at least why that doesn't work or better help me solving the problem.
This is my second attempt to get a useful answer from StackOverflow by posting a question. Please do me a favour. Please consider read the situation before you mark this question an duplicate. The last time it wasn't an duplicate the problem was by far different.
Edit
As requested the Error message(sry I'm at work so the language is german)
Ausnahme ausgelöst bei 0x0126489D in GLFWGame.exe: 0xC0000005:
Zugriffsverletzung beim Lesen an Position 0xCCCCCEA4.
Falls ein Handler für diese Ausnahme vorhanden ist, kann das Programm
möglicherweise weiterhin sicher ausgeführt werden.
And I'll try to shorten the code to the most important.

You store addresses of stack objects that get deleted. For example,
Window wi = Window(800, 600, "test");
w = &wi;
Creates a local variable wi on the stack, which gets deleted automatically when it goes out of scope (which is the case at the end of the function). After that, w will point to an address that is already freed, which will lead to big troubles when you try to access this variables later on as you do here:
glfwSwapBuffers(w->getWindow());
If you want to create the window object on the heap, you have to use the following code in DemoGame::init():
w = new Window(800, 600, "test");
Don't forget to delete this object manually by calling delete w when you don't need it anymore. The same problem also occures for the TestShader instance.
Side note: Window wi = Window(800, 600, "test"); is still a strange syntax when creating objects on the stack. The correct way would be Window wi(800, 600, "test"); Have a look at this posts for why this makes a difference: Calling constructors in c++ without new
Edit: Your first example just works because you are calling the render function inside the init function, thus the objects do not get out of scope. Storing pointers to local object is still not good practice.

Your problem is here:
Window wi = Window(800, 600, "test");
w = &wi;
TestShader te = TestShader();
t = &te;
Both, the instance of Window as well as the instance of TestShader are local variables that will get cleaned up as soon as they go out of scope (end of init) and hence remembering their addresses has no meaning. You will need to create those instances dynamically (new) or set them up within your class definition.

Related

gl_deletebuffers not working in a separate thread

In my rendering application if i run the render function in the main loop than everything works fine but if i take the rendering function to another thread than the destructor of the objects is not able to release the buffer.
when any object gets destroyed
The destructor for the objects are called but it seems as if gl_deletebuffers are not able to release the buffer.
How i came to this conclusion
1) when i run everthing in the main loop and if i create a object and the VAO number for the object is 1
2) after destroying the object the next object VAO is also assigned number 1.
///////////////////////////////////////////////////////////////////////////////////////////////////////////
1) But when Rendering part goes to a seprate thread than VAO number keeps on incrementing with every object
2) System Ram memory also keeps increasing and when i close the application than only the memory is released.
3) Destructor for objects is definitely called when i delete a object but it seems as if destructor has not been able to release the buffer.
//#define GLEW_STATIC
#include <gl\glew.h>
#include <glfw3.h>
#include "TreeModel.h"
#include "ui_WavefrontRenderer.h"
#include <QtWidgets/QApplication>
#include <QMessageBox>
#include <thread>
#define FPS_WANTED 60
const double limitFPS = 1.0 / 50.0;
Container cont;
const unsigned int SCR_WIDTH = 800;
const unsigned int SCR_HEIGHT = 600;
void framebuffer_size_callback(GLFWwindow* window, int width, int height);
void processInput(GLFWwindow *window);
GLFWwindow* window = nullptr;
void RenderThread(WavefrontRenderer* w)
{
glfwMakeContextCurrent(window);
GLenum GlewInitResult;
glewExperimental = GL_TRUE;
GlewInitResult = glewInit();
if (GLEW_OK != GlewInitResult) // Check if glew is initialized properly
{
QMessageBox msgBox;
msgBox.setText("Not able to Initialize Glew");
msgBox.exec();
glfwTerminate();
}
if (window == NULL)
{
QMessageBox msgBox;
msgBox.setText("Not able to create GL Window");
msgBox.exec();
glfwTerminate();
//return -1;
}
w->InitData();
glEnable(GL_MULTISAMPLE);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glBlendEquation(GL_FUNC_ADD);
while (!glfwWindowShouldClose(window))
{
// input
// -----
processInput(window);
// - Measure time
glClearColor(0.3, 0.3, 0.3, 0.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
w->render(); // DO the Rendering
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwTerminate();
std::terminate();
}
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
cont.SetName("RootItem");
TreeModel* model = new TreeModel("RootElement", &cont);
WavefrontRenderer w(model);
w.show();
glfwInit();
glfwWindowHint(GLFW_RESIZABLE, GL_TRUE);
glfwWindowHint(GLFW_SAMPLES, 4);
window = glfwCreateWindow(SCR_WIDTH, SCR_HEIGHT, "Renderer", nullptr, nullptr); // Create the render
window
glfwMakeContextCurrent(0);
std::thread renderThread(RenderThread, &w);
renderThread.detach();
return a.exec();
return 0;
}
Class defination for a Object
the render function w->render() calls the draw() function of a object.
The Base class has a virtual destructor.
#include "Triangle.h"
#include "qdebug.h"
#include "qmessagebox.h"
float verticesTriangle[] = {
-50.0f, -50.0f, 0.0f, 0.0f , 0.0f,1.0f ,0.0f, 0.0f,
50.0f, -50.0f, 0.0f, 0.0f , 0.0f,1.0f ,1.0f, 0.0f,
0.0f, 50.0f, 0.0f, 0.0f, 0.0f,1.0f ,0.5f, 1.0f
};
Triangle::Triangle() : Geometry("TRIANGLE", true)
{
this->isInited = 0;
this->m_VBO = 0;
this->m_VAO = 0;
this->iNumsToDraw = 0;
this->isChanged = true;
}
Triangle::Triangle(const Triangle& triangle) : Geometry( triangle )
{
CleanUp();
this->isInited = 0;
this->m_VBO = 0;
this->m_VAO = 0;
this->iNumsToDraw = triangle.iNumsToDraw;
this->isChanged = true;
this->shader = ResourceManager::GetShader("BasicShader");
iEntries = 3;
}
Triangle& Triangle::operator=(const Triangle& triangle)
{
CleanUp();
Geometry::operator=(triangle);
this->isInited = 0;
this->m_VBO = 0;
this->m_VAO = 0;
this->iNumsToDraw = triangle.iNumsToDraw;
this->isChanged = true;
this->shader = ResourceManager::GetShader("BasicShader");
return (*this);
}
void Triangle::init()
{
glGenVertexArrays(1, &m_VAO);
glGenBuffers(1, &m_VBO);
glBindVertexArray(m_VAO);
glBindBuffer(GL_ARRAY_BUFFER, m_VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(verticesTriangle), verticesTriangle, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float)));
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
isInited = true;
}
void Triangle::CleanUp()
{
if (!this->isInited)
{
return;
}
if (this->m_VAO)
glDeleteVertexArrays(1, &this->m_VAO);
if (this->m_VBO)
glDeleteBuffers(1, &this->m_VBO);
this->isInited = false;
}
void Triangle::draw()
{
if (isChanged)
{
init();
isChanged = false;
}
this->shader.Use();
glBindVertexArray(m_VAO);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
}
Triangle::~Triangle()
{
if (this->m_VAO)
glDeleteVertexArrays(1, &this->m_VAO);
if (this->m_VBO)
glDeleteBuffers(1, &this->m_VBO);
this->isInited = false;
}
OpenGL contexts are thread local state:
Every thread has exactly one or no OpenGL contexts active in it at any given time.
Each OpenGL context must be active in no or exactly one thread at any given time.
OpenGL contexts are not automatically migrated between thread.
I.e. if you don't explicitly unmake current the OpenGL context in question on the threads it's currently active, and subsequently make it active on the thread you're calling glDeleteBuffers on, the call on that will have no effect; on the context you expected it to have an effect on, at least.

OpenGL function in a class

I'm creating an OpenGL project which includes ParticleFactory class to generate particles in a scene.
To achieve that I have created a class:
#ifndef PARTICLE_H
#define PARTICLE_H
#include <glad/glad.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <iostream>
#include <vector>
#include "vertex.h"
#include <shader_s.h>
class ParticleFactory {
public:
Shader* shader;
std::vector<Particle> particles;
unsigned int VAO, VBO, EBO;
unsigned int nr_particles;
ParticleFactory(unsigned int nr_particles, unsigned int color){
Shader sh("shaders/particle.vs", "shaders/particle.fs");
shader = &sh;
this->nr_particles = nr_particles;
for(unsigned int i = 0; i < nr_particles; i++) {
float x = (rand()%200)/100.0 - 1;
float y = (rand()%200)/100.0 - 1;
float z = (rand()%200)/100.0 - 1;
particles.push_back(Particle(glm::vec4(x, y, z, 0.0)));
}
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(base_particle), base_particle, GL_STREAM_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(base_particle_indices), base_particle_indices, GL_STREAM_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(0);
}
void draw() {
shader->use();
shader->setMat4("model", model);
shader->setMat4("view", view);
shader->setMat4("projection", projection);
shader->setMat4("transform", transform);
for (int i=0; i<nr_particles; i++) {
shader->setVec4("offset", particles[i].offset);
glBindVertexArray(VAO);
glDrawElements(GL_TRIANGLES, 24, GL_UNSIGNED_INT, 0);
particles[i].offset += glm::vec4(0.0f, -0.001f, 0.0f, 0.0f);
if (particles[i].offset.y <= -1.0f) {
particles[i].offset.y = 1.0f;
}
}
}
};
#endif
My approach here is to first instantiate an object and then call .draw() on the main loop but it doesn't seems to work and I don't know why.
Before we even get to the OpenGL part, you're creating a LOCAL object (sh) and then assigning the address of that to the shader member. At the end of the constructor, sh will be deleted, and thus the shader member will be pointing to … ? First, you'll need to use new/delete (or better yet, use std::unique_ptr< Shader >) to make sure that object is still around when you want to use it.
Try that FIRST!

Program crash when calling OpenGL functions

I'm trying to setup a game engine project. My visual studio project is setup so that I have an 'engine' project separate from my 'game' project. Then engine project is being compiled to a dll for the game project to use. I've already downloaded and setup glfw and glew to start using openGL. My problem is when ever I hit my first openGL function the program crashes. I know this has something to do with glewinit even though glew IS initializing successfully (no console errors). In my engine project, I have a window class where, upon window construction, glew should be setup:
Window.h
#pragma once
#include "GL\glew.h"
#include "GLFW\glfw3.h"
#if (_DEBUG)
#define LOG(x) printf(x)
#else
#define LOG(x)
#endif
namespace BlazeGraphics
{
class __declspec(dllexport) Window
{
public:
Window(short width, short height, const char* title);
~Window();
void Update();
void Clear() const;
bool Closed() const;
private:
int m_height;
int m_width;
const char* m_title;
GLFWwindow* m_window;
private:
Window(const Window& copy) {}
void operator=(const Window& copy) {}
};
}
Window.cpp (where glewinit() is called)
#include "Window.h"
#include <cstdio>
namespace BlazeGraphics
{
//Needed to define outside of the window class (not sure exactly why yet)
void WindowResize(GLFWwindow* window, int width, int height);
Window::Window(short width, short height, const char* title) :
m_width(width),
m_height(height),
m_title(title)
{
//InitializeWindow
{
if (!glfwInit())
{
LOG("Failed to initialize glfw!");
return;
};
m_window = glfwCreateWindow(m_width, m_height, m_title, NULL, NULL);
if (!m_window)
{
LOG("Failed to initialize glfw window!");
glfwTerminate();
return;
};
glfwMakeContextCurrent(m_window);
glfwSetWindowSizeCallback(m_window, WindowResize);
}
//IntializeGl
{
//This needs to be after two functions above (makecontextcurrent and setwindowresizecallback) or else glew will not initialize
**if (glewInit() != GLEW_OK)
{
LOG("Failed to initialize glew!");
}**
}
}
Window::~Window()
{
glfwTerminate();
}
void Window::Update()
{
glfwPollEvents();
glfwSwapBuffers(m_window);
}
void Window::Clear() const
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
}
//Returns a bool because glfwwindowShouldClose returns a nonzero number or zero
bool Window::Closed() const
{
//Made it equal to 1 to take away warning involving converting an int to bool
return glfwWindowShouldClose(m_window) == 1;
}
//Not part of window class so defined above
void WindowResize(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
}
Here is my main.cpp file which is found within my game project where I currently have my openGL functionality in global functions (just for now):
main.cpp
#include <iostream>
#include <array>
#include <fstream>
#include "GL\glew.h"
#include "GLFW\glfw3.h"
#include "../Engine/Source/Graphics/Window.h"
void initializeGLBuffers()
{
GLfloat triangle[] =
{
+0.0f, +0.1f, -0.0f,
0.0f, 1.0f, 0.0f,
-0.1f, -0.1f, 0.0f, //1
0.0f, 1.0f, 0.0f,
+0.1f, -0.1f, 0.0f, //2
0.0f, 1.0f, 0.0f,
};
GLuint bufferID;
glGenBuffers(1, &bufferID);
glBindBuffer(GL_ARRAY_BUFFER, bufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(triangle), triangle, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, (sizeof(GLfloat)) * 6, nullptr);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, (sizeof(GLfloat)) * 6, (char*)((sizeof(GLfloat)) * 3));
GLushort indices[] =
{
0,1,2
};
GLuint indexBufferID;
glGenBuffers(1, &indexBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
};
void installShaders()
{
//Create Shader
GLuint vertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
//Add source or text file to shader object
std::string temp = readShaderCode("VertexShaderCode.glsl");
const GLchar* adapter[1];
adapter[0] = temp.c_str();
glShaderSource(vertexShaderID, 1, adapter, 0);
temp = readShaderCode("FragmentShaderCode.glsl").c_str();
adapter[0] = temp.c_str();
glShaderSource(FragmentShaderID, 1, adapter, 0);
//Compile Shadaer
glCompileShader(vertexShaderID);
glCompileShader(FragmentShaderID);
if (!checkShaderStatus(vertexShaderID) || !checkShaderStatus(FragmentShaderID))
return;
//Create Program
GLuint programID = glCreateProgram();
glAttachShader(programID, vertexShaderID);
glAttachShader(programID, FragmentShaderID);
//Link Program
glLinkProgram(programID);
if (!checkProgramStatus(programID))
{
std::cout << "Failed to link program";
return;
}
//Use program
glUseProgram(programID);
}
int main()
{
BlazeGraphics::Window window(1280, 720, "MyGame");
initializeGLBuffers();
installShaders();
while (!window.Closed())
{
window.Clear();
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_SHORT, 0);
window.Update();
};
return 0;
}
Now if I were to move the glewinit() code here in my main.cpp:
int main()
{
BlazeGraphics::Window window(1280, 720, "MyGame");
if (glewInit() != GLEW_OK)
{
LOG("Failed to initialize glew!");
}
initializeGLBuffers();
installShaders();
while (!window.Closed())
{
window.Clear();
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_SHORT, 0);
window.Update();
};
return 0;
}
then my program compiles fine. Why does trying to initialize glew within engine.dll cause a program crash? Thanks for any help.
GLEW works by defining a function pointer as global variable for each OpenGL function. Let's look at glBindBuffer as an example:
#define GLEW_FUN_EXPORT GLEWAPI
typedef void (GLAPIENTRY * PFNGLBINDBUFFERPROC) (GLenum target, GLuint buffer);
GLEW_FUN_EXPORT PFNGLBINDBUFFERPROC __glewBindBuffer;
So we just have a __glewBindBuffer function pointer, which will be set to the correct address from your OpenGL implementation by glewInit.
To actually be able to write glBindBuffer, GLEW simply defines pre-processor macros mapping the GL functions to those function pointer variables:
#define glBindBuffer GLEW_GET_FUN(__glewBindBuffer);
Why does trying to initialize glew within engine.dll cause a program crash?
Because your engine.dll and your main application each have a separate set of all of these global variables. You would have to export all the __glew* variables from your engine DLL to be able to get access to the results of your glewInit call in engine.dll.

Problems with initiating different models in openGl with c++

I'm learning openGL and I'm trying to do something very simple. I have a graphics object called Model which contains a vector of GLfloats for vertices and has a draw function. In addition to this, I have an addVertex function which takes in 3 floats and a draw function which binds the object's VAO and then unbinds it after drawing. I have no problem rendering one of these objects and adding points to it, however I can't draw more than one of these at the same time without the program crashing and visual studio's telling me "Frame not in module" which isn't helpful. Also, only the last Model object I create can be rendered, any object that comes before it will crash the program.
Here's the Model object's code:
#include "Model.h"
#include <iostream>
#include <string>
Model::Model() {
drawMode = 0;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glGenBuffers(1, &EBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, NULL, NULL, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, NULL, NULL, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
glBindVertexArray(0);
}
Model::~Model() {
glDeleteVertexArrays(1, &VAO);
glDeleteBuffers(1, &VBO);
glDeleteBuffers(1, &EBO);
}
void Model::addVertex(GLfloat x, GLfloat y, GLfloat z) {
vertices.push_back(x);
vertices.push_back(y);
vertices.push_back(z);
update();
}
void Model::addIndex(int i, int j, int k) {
indices.push_back(i);
indices.push_back(j);
indices.push_back(k);
}
void Model::setShader(GLuint& shader) {
shaderProgram = &shader;
}
void Model::clearModel() {
vertices.clear();
indices.clear();
}
int Model::sizeOfVertices() {
return sizeof(GLfloat)*vertices.size();
}
int Model::sizeOfIndices() {
return sizeof(GLuint)*(indices.size());
}
void Model::draw() {
glUseProgram(*shaderProgram);
GLuint transformLoc = glGetUniformLocation(*shaderProgram, "model");
glUniformMatrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(model));
glBindVertexArray(VAO);
switch (drawMode) {
case 0: glDrawArrays(GL_POINTS, 0, vertices.size() / 3);
break;
case 1: glDrawArrays(GL_LINE_STRIP, 0, vertices.size() / 3);
break;
case 2: glDrawArrays(GL_TRIANGLES, 0, vertices.size() / 3);
break;
case 3: glDrawArrays(GL_TRIANGLE_STRIP, 0, vertices.size() / 3);
break;
default: break;
}
glBindVertexArray(0);
}
void Model::setDrawMode(int type) {
drawMode = type;
}
void Model::move(glm::vec3 translation) {
model = glm::translate(model, translation);
}
void Model::rotate(float degrees,glm::vec3 axis) {
model = glm::rotate(model, degrees,axis);
}
void Model::update() {
glBindVertexArray(0);
glBindVertexArray(VAO);
glBufferData(GL_ARRAY_BUFFER, NULL, NULL, GL_STATIC_DRAW);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, NULL, NULL, GL_STATIC_DRAW);
glBufferData(GL_ARRAY_BUFFER, sizeOfVertices(), &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glBindVertexArray(0);
}
There is an EBO included in the Model class but I'm not using it yet as I want to isolate the problem.
Model.h:
#pragma once
#include <vector>;
#include <glew.h>;
#include "glm.hpp"
#include "gtc/matrix_transform.hpp"
#include "gtc/type_ptr.hpp"
#include "gtc/constants.hpp"
class Model {
public:
int drawMode;
GLuint VAO;
GLuint VBO;
GLuint EBO;
GLuint *shaderProgram;
std::vector<GLfloat> vertices;
std::vector<GLuint> indices;
glm::mat4 model;
Model();
~Model();
void addVertex(GLfloat, GLfloat, GLfloat);
void addIndex(int, int, int);
void setShader(GLuint&);
void clearModel();
int sizeOfVertices();
int sizeOfIndices();
void draw();
void setDrawMode(int);
void move(glm::vec3);
void rotate(float, glm::vec3);
void update();
};
Main class:
#include <iostream>
#include <string>
#include <fstream>
#include <vector>
#include "Callbacks.h"
#include "Shader.h"
#include "GlState.h"
#include "Model.h"
#include <glew.h>
#include <glfw3.h>
#include "glm.hpp"
#include "gtc/matrix_transform.hpp"
#include "gtc/type_ptr.hpp"
#include "gtc/constants.hpp"
Model *model;
Model *model2;
Model *model3;
void mainLoop(GLFWwindow* window) {
glfwPollEvents();
//RENDER UNDER HERE
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glPointSize(5);
Callbacks::update(model3);
model3->draw();
model2->draw();
model->draw();
glfwSwapBuffers(window);
}
int main() {
GLFWwindow* window = GlState::Initialize(800,600);
GLuint shaderProgram = Shader::makeProgram("vertex.vs", "fragment1.fs");
GLuint shaderProgram2 = Shader::makeProgram("vertex.vs", "fragment2.fs");
model = new Model();
model2 = new Model();
model3 = new Model();
model->setShader(shaderProgram);
model2->setShader(shaderProgram);
model3->setShader(shaderProgram2);
while (!glfwWindowShouldClose(window))
{
mainLoop(window);
}
delete(model);
delete(model2);
delete(model3);
glfwTerminate();
return 0;
}
I'll also include the important part of Callbacks, which is called by update(Model* m)
void Callbacks::checkMouse(Model* m) {
if (leftMousePressed) {
if (tics == 0) {
m->addVertex(2*(mouseXCurrent-0.5), 2*(0.5-mouseYCurrent), 0.0f);
}
tics++;
}
}
I haven't set the camera yet so its in default orthographic view.
Since I can only add points to the last Model object initiated without crashing (model3 in this case), I assume it must be something to do with the way these models are being initiated but I can't figure out what! It's driving me nuts.
So an example of it crashing:
after the code segment in main:
model->setShader(shaderProgram);
model2->setShader(shaderProgram);
model3->setShader(shaderProgram2);
if you input
model->addVertex(0.1f,0.2f,0.0f);
or
model2->addVertex(0.1f,0.2f,0.0f);
the program will crash, however
model3->addVertex(0.1f,0.2f,0.0f);
works with no problems
The code will crash because you are accessing data outside of the buffers. In the constructor, you create buffers with size NULL:
glBufferData(GL_ARRAY_BUFFER, NULL, NULL, GL_STATIC_DRAW)
Note that the pointer type NULL is totally invalide for the size argument of that call, but that is not the issue here.
When you call the addVertex method, you append the data to your std::vector, but not to the VBO. Yo never call the update method which actually transfers the data into your buffer, so your attribute pointers still point to the 0-sized buffer, and when drawing more than 0 vertices from that, you're just in undefined-behavior land.
So I found what the problem is. Within the update method, I would bind the VAO and the problem was that I assumed this also bound the VBO for editing which apparently never happened. So when I called
glBufferData(GL_ARRAY_BUFFER, sizeOfVertices(), &vertices[0], GL_STATIC_DRAW);
I thought I was modifying the VBO within the VAO, but I was actually modifying the last VBO bound. The last time the VBO was bound was in the constructor, so this is why I was only able to modify the last Model object created, and the other ones would crash because I would bind their VAO and attempt to modify another Model's VBO.
At least I assume this was the problem as there is no error now because I'm calling
glBindBuffer(GL_ARRAY_BUFFER, VBO);
before I modify it.

Vertex Buffer issues

I recently created this VertexBuffer class to and added it to my opengl based engine; but it seems to fail in my main code, causing an "access violation at 0x00000000."...
Here is VertexBuffer.h :
#pragma once
#include "Type.h"
#include "Tags.h"
namespace Spiky {
namespace GLLayer {
template<>
struct GLObjDelAlloc<Tags::VertexBuffer> {
static void Allocate(GLTypeCT<GLType::Integer> count,
Tags::VertexBuffer::internal* names) {
glGenBuffers(count, names);
}
static void Delete(GLTypeCT<GLType::Integer> count, Tags::VertexBuffer::internal* names) {
glDeleteBuffers(count, names);
}
};
template<>
struct GLObjBind<Tags::VertexBuffer> {
private:
static Tags::VertexBuffer::internal _active;
public:
static const int ZERO_BUFFER = GL_NONE;
static void BindRequest(Tags::VertexBuffer::internal name = ZERO_BUFFER) {
if(_active != name) {
glBindBuffer(GL_ARRAY_BUFFER, 0);
_active = name;
}
}
};
Tags::VertexBuffer::internal
GLObjBind<Tags::VertexBuffer>::_active = 0;
//wrapper around GLObjDelAlloc & GLObjBind
class VertexBuffer {
public:
explicit VertexBuffer() {
GLObjDelAlloc<Tags::VertexBuffer>::Allocate(1, &handle_);
}
inline void Bind() {
GLObjBind<Tags::VertexBuffer>::BindRequest(handle_);
}
template<typename T>
static void BufferData(const GLTypeCT<GLType::Integer> count, GLTypeCT<T>* data) {
glBufferData(GL_ARRAY_BUFFER, count *
sizeof(data->stride), data, GL_STATIC_DRAW);
}
VertexBuffer& operator=(const VertexBuffer& other) = delete;
VertexBuffer(const VertexBuffer& other) = delete;
private:
Tags::VertexBuffer::internal handle_;
};
} //namespace GLLayer
} //namespace Spiky
And here is my main:
int main(int argc, char** args)
{
GLTypeCT<GLfloat> quadVertices[] = {
// Positions // Texture Coords
-1.0f, 1.0f, 0.0f, -1.0f, -1.0f, 0.0f,
1.0f, 1.0f, 0.0f, 1.0f, -1.0f, 0.0f,
};
// Setup plane VAO
glGenVertexArrays(1, &quadVAO);
VertexBuffer buffer{};
glBindVertexArray(quadVAO);
buffer.Bind();
VertexBuffer::BufferData(12, quadVertices);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat),
(GLvoid*)(0));
glBindVertexArray(0);
auto RenderQuad = [&] {
glBindVertexArray(quadVAO);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
};
//render calls later on...
}
Do you think the error comes from VertexBuffer ?
I see a few possible issues here.
But first, 1280 means GL_INVALID_ENUM, where did you get this? After which opengl call?
In this:
static void BindRequest(Tags::VertexBuffer::internal name = ZERO_BUFFER) {
if(_active != name) {
glBindBuffer(GL_ARRAY_BUFFER, 0);
_active = name;
}
You should probably pass glBindBuffer what you want to bind. glBindBuffer(GL_ARRAY_BUFFER, name)
I doubt this is causing it to crash but, when you call glVertexAttribPointer you're passing it 3 * sizeof(float) when it should be 6 * sizeof(float), why? Because the stride should be the size of your entire "Vertex" which in your case contains the positions and texcoords, which sums up to 6 floats.
And when you're calling glDrawArrays(GL_TRIANGLE_STRIP, 0, 4) you're saying that the count is 4, but as far as I can tell it's only 2.
The "access violation at 0x00000000", you get that if the function pointer to that function hasn't been retrieved, what are you using to get those pointers? I saw in the comments that you use SDL but that won't do any of that for you. You could do it yourself or use GLEW.