Currently working on a personal implementation of the Boids Flocking simulation to test what I've learned about OpenGL, I've been looking to see how high I can get the # of bonds drawn upon the screen above 30 FPS.
Unfortunately I've hit a road-block with how at 2^16 and beyond boids (Where each boid is three float vertices each of size 3, where the z value is maintained to be 0) the graphical output begins to flicker with boids disappearing and reappearing upon the screen.
Interestingly this only seems to happens in the beginning stages of the app, with the flickering seemingly ending upon the boids forming their "flocks" and coincidentally clumping together visually.
I am also suspect of the number 2^16 as it matches with the GLshort bit count of 16, so potentially an overflow of the values?
Here is the area I thought was most pertinent to the issue
std::vector<float> vertices(flock.boids.size()*9, 0.f);
unsigned int VAO, VBO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices[0])*static_cast<uint>(vertices.size()), vertices.data(), GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3*sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
float FPS_sum = 0;
size_t frames = 0;
float CPS_sum = 0;
std::chrono::high_resolution_clock::time_point start;
// render loop
while(!glfwWindowShouldClose(window))
{
processInput(window);
//Begin CPS timer
start = std::chrono::high_resolution_clock::now();
//Computation Step
std::vector<point_bucket<Boid> > tree;
point_bucket<Boid> base(0, 0, 2, 2, flock.boids);
base_split(base, 16, tree, 20);
for(auto& elm: tree)
{
flock.Update(elm.bucket);
}
flock.Mirror();
CPS_sum+=1000.f/std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now()-start).count();
// rendering commands here
glClearColor(0.2f, 0.3f, 0.2f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(shaderProgram);
//Begin FPS timer
start = std::chrono::high_resolution_clock::now();
glBindVertexArray(VAO);
updateVertices(flock.boids, window, vertices);
updateBuffer(VBO, 0, vertices.data(), sizeof(vertices[0])*static_cast<uint>(vertices.size()), GL_ARRAY_BUFFER);
size_t draw_running_total = vertices.size()/3;
GLint draw_offset = 0;
while(draw_running_total > 0)
{
glDrawArrays(GL_TRIANGLES, draw_offset, draw_running_total > draw_size? draw_size: draw_running_total);
draw_running_total -= draw_size;
draw_offset += draw_size;
}
// check and call events and swap the buffers
glfwSwapBuffers(window);
glfwPollEvents();
// PULL FPS FOR DRAW
FPS_sum+=1000.f/std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::high_resolution_clock::now()-start).count();
frames++;
if(frames==100)
{
std::cout << "Average across 100 frames... \n" << FPS_sum/frames << " FPS\n"
<< CPS_sum/frames << " CPS\n";
FPS_sum = 0;
CPS_sum = 0;
frames = 0;
}
//END PULL FOR FPS
}
void updateBuffer(uint &id, uint offset, void *data, uint size, GLenum shaderType)
{
glBindBuffer(shaderType, id);
glBufferSubData(shaderType, offset, size, data);
}
Personally looking online myself I was able to find questions like...
https://stackoverflow.com/questions/24099139/c-opengl-flickering-issues
... which seem only to deal with extraneous buffer swapping, whereas I only swap buffers at the end of the draw loop.
A reddit post addressed a similar issue and found a potential solution in a FBO, but still couldn't discover the root cause of the problem
Any help on would be greatly appreciated!
The GitHub repo for this project is also available for those interested.
Related
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 1 year ago.
Improve this question
I'm trying to create a simple particle system but I'm facing the segmentation fault error. My problem seems to be related to the number of vertex attributes I'm passing. One vertex has 3 floats, corresponding to x, y and z coordinates, so if I want to create 1 million particles, I'll have an array of 3 million floats containing the positions coordinates for each particle. The problem is that if I go above a certain number of particles, like around 400000, I get a segmentation fault.
Here's part of the code:
#define NP 1000000
int main(void)
{
// Init GLFW
MyGLFW myglfw(4, 5, W, H, "Particles");
// Create shader program
Shader shader("shaders/shader.vs", "shaders/shader.fs");
// Define particle's vertex attrib
float particleData[NP * 3];
for(uint i = 0; i < NP * 3; i++)
{
// Creates a new particle with random position in the given range
Particle p(-1.0f, 1.0f);
particleData[i++] = p.pos[0];
particleData[i++] = p.pos[1];
particleData[i] = p.pos[2];
}
// Create Vertex Buffer Object (VBO) to store vertices into GPU memory
uint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(particleData), particleData, GL_STATIC_DRAW);
// Position
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
// Render loop
while(!glfwWindowShouldClose(myglfw.getWindow()))
{
// Check input
myglfw.processInput();
// Rendering commands
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
shader.use();
// Draw triangle through VAO
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, NP * 3);
glBindVertexArray(0);
// GLFW: swap buffers and poll IO events (e.g. keys pressed, released, mouse moved, etc.)
glfwSwapBuffers(myglfw.getWindow());
glfwPollEvents();
}
// GLFW: terminate, clearing all previously allocated GLFW resources
myglfw.terminate();
return 0;
}
I manage to write around 400000-500000 particles and above that I get a segmentation fault. GDB says that the segmentation signal comes right at the first line of the main, which I don't understand. I also tried to set the number of particles by setting a long int nr = 1000000 directly in the main function, but I get the same error, and in this way GDB gives me an error at float particleData[nr];.
I'm writing and running my code on a Linux system using VS Code, my GPU is a GTX 1070 FE.
As per the comment, the use of...
float particleData[NP * 3];
is, potentially, causing a stack overflow. Rather than allocate such a large array on the stack you should consider using std::vector instead. The following is your code with (hopefully) the minimal necessary modifications (look for #G.M. )...
#define NP 1000000
int main(void)
{
// Init GLFW
MyGLFW myglfw(4, 5, W, H, "Particles");
// Create shader program
Shader shader("shaders/shader.vs", "shaders/shader.fs");
// Define particle's vertex attrib
std::vector<float> particleData(NP * 3); /* #G.M. */
for(uint i = 0; i < NP * 3; i++)
{
// Creates a new particle with random position in the given range
Particle p(-1.0f, 1.0f);
particleData[i++] = p.pos[0];
particleData[i++] = p.pos[1];
particleData[i] = p.pos[2];
}
// Create Vertex Buffer Object (VBO) to store vertices into GPU memory
uint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(particleData[0]) * particleData.size(), particleData.data(), GL_STATIC_DRAW); /* #G.M. */
// Position
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
// Render loop
while(!glfwWindowShouldClose(myglfw.getWindow()))
{
// Check input
myglfw.processInput();
// Rendering commands
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
shader.use();
// Draw triangle through VAO
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, NP * 3);
glBindVertexArray(0);
// GLFW: swap buffers and poll IO events (e.g. keys pressed, released, mouse moved, etc.)
glfwSwapBuffers(myglfw.getWindow());
glfwPollEvents();
}
// GLFW: terminate, clearing all previously allocated GLFW resources
myglfw.terminate();
return 0;
So I'm writing a chunk based procedurally generated terrain game and am running into two errors:
Basically, the way it's working is it generates chunks around the player position, once per game loop.
for (int i = RENDER_RADIUS; i >= 0; i--)
{
for (int j = RENDER_RADIUS; j >= 0; j--)
{
terr.renderChunk(glm::ivec2(c->getXOff() + i, c->getZOff() + j), cubeShader);
terr.renderChunk(glm::ivec2(c->getXOff() - i, c->getZOff() - j), cubeShader);
terr.renderChunk(glm::ivec2(c->getXOff() - i, c->getZOff() + j), cubeShader);
terr.renderChunk(glm::ivec2(c->getXOff() + i, c->getZOff() - j), cubeShader);
}
}
In terr.renderChunk I am using a unordered_map that uses the chunks position as the key and the chunk is the value. If the unordered_map doesn't find the chunk then the position gets added to terr.updateList.
Then, back in the game loop:
if (!terr.updateList.empty())
{
terr.updateChunk(terr.updateList[terr.updateList.size()-1]);
terr.world[terr.updateList[terr.updateList.size()-1]]->render(cubeShader);
terr.updateList.pop_back();
}
In a separate line, I'm ensuring that the players current chunk is loaded as well.
To generate a chunks VBO I added the indices to the chunks vector points and then build it as so:
glGenVertexArrays(1, &this->VAO);
glBindVertexArray(this->VAO);
// vertice VBO
glGenBuffers(1, &this->VBO_VERT);
glBindBuffer(GL_ARRAY_BUFFER, this->VBO_VERT);
glBufferData(GL_ARRAY_BUFFER, this->points.size() * sizeof(glm::vec3), &this->points[0][0], GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(0);
// texture coords
glGenBuffers(1, &this->VBO_UV);
glBindBuffer(GL_ARRAY_BUFFER, this->VBO_UV);
glBufferData(GL_ARRAY_BUFFER, this->uvs.size() * sizeof(glm::vec2), &this->uvs[0][0], GL_STATIC_DRAW);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*)(0));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
Now, it all generally works, but I randomly get a segmentation fault and have debugged it down to my render function:
void Chunk::render(Shader shader)
{
shader.setMat4("transform", offsetMatrix);
shader.setFloat("transparency", 1.0f);
glBindVertexArray(VAO);
cout << "size " << points.size() << endl;
glDrawArrays(GL_TRIANGLES, 0, points.size()); //RIGHT HERE CAUSES THE SEGFAULTS
cout << "TEST2" << endl;
}
The segmentation fault seems to happen randomly, however, I do believe it doesn't happen on new chunks but rather going back over old ones.
My question was, is there anything specific with OpenGL/C++ that I'm unaware of that could be causing it?
The other error I'm getting that may be related but I've debugged less is I'm getting chunk rendering errors as so where it renders random terrain but when I go into it, collision still works as if terrain was where it should be.
I realize this is a long question, but any support is really appreciated!
Switching your render call to a renderChunk looks like it would be a safer alternative, not sure if it'd fix the segfault but from what I can see, that's a safer, and not too much slower bet.
I want to draw a cube and a sphere and apply a different texture to each.
I use blender to create the scene and then export to an obj file which then includes the vertices, normals, uvs and faces for both objects as well as the textures.
I have created a routine which loads all the data from the obj file. This all works as I can load the objects and display them etc but with only one texture. As I say I have gone through pages and pages of code and posts and 99% only deal with 1 texture to 1 object and those that deal with multiple textures only deal with one object or are in a very old version of openGL.
The one thing I haven't tried is uniform sample2D arrays in the fragment shader but I haven't found an explanation on that so haven't tried it.
My code that I have below:
ObjLoader *obj = new ObjLoader();
string _filepath = "objects\\" + _filename;
//bool res = obj->loadObjWithStaticColor(_filepath.c_str(), _vertices, _normals, vertex_colors, _colors, 1.0);
bool res = obj->loadObjWithTextures(_filepath.c_str(), _objects, _textures);
program = InitShader("shaders\\vshader.glsl", "shaders\\fshader.glsl");
glUseProgram(program);
GLuint vao_world_objects;
glGenVertexArrays(1, &vao_world_objects);
glBindVertexArray(vao_world_objects);
//GLuint vbo_world_objects;
//glGenBuffers(1, &vbo_world_objects);
//glBindBuffer(GL_ARRAY_BUFFER, vbo_world_objects);
NumVertices = _objects[_objects.size() - 1]._stop + 1;
for (size_t i = 0; i < _objects.size(); i++)
{
_vertices.insert(_vertices.end(), _objects[i]._vertices.begin(), _objects[i]._vertices.end());
_normals.insert(_normals.end(), _objects[i]._normals.begin(), _objects[i]._normals.end());
_uvs.insert(_uvs.end(), _objects[i]._uvs.begin(), _objects[i]._uvs.end());
}
GLuint _vSize = _vertices.size() * sizeof(point4);
GLuint _nSize = _normals.size() * sizeof(point4);
GLuint _uSize = _uvs.size() * sizeof(point2);
GLuint _totalSize = _vSize + _uSize; // normals + vertices + uvs
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, _vSize, &_vertices[0], GL_STATIC_DRAW);
GLuint uvbuffer;
glGenBuffers(1, &uvbuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
glBufferData(GL_ARRAY_BUFFER, _uSize, &_uvs[0], GL_STATIC_DRAW);
TextureID = glGetUniformLocation(program, "myTextureSampler");
TextureObjects = new GLuint[_textures.size()];
glGenTextures(_textures.size(), TextureObjects);
for (size_t i = 0; i < _textures.size(); i++)
{
// "Bind" the newly created texture : all future texture functions will modify this texture
glBindTexture(GL_TEXTURE_2D, TextureObjects[i]);
// Give the image to OpenGL
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, _textures[i].width, _textures[i].height, 0, GL_BGR, GL_UNSIGNED_BYTE, _textures[i]._tex_data);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
}
for (size_t i = 0; i < _objects.size(); i++)
{
if (i == 0)
{
glActiveTexture(GL_TEXTURE0);
}
else
{
glActiveTexture(GL_TEXTURE1);
}
glBindTexture(GL_TEXTURE_2D, TextureObjects[i]);
GLuint _v_size = _objects[i]._vertices.size() * sizeof(point4);
GLuint _u_size = _objects[i]._uvs.size() * sizeof(point2);
GLuint vPosition = glGetAttribLocation(program, "vPosition");
glEnableVertexAttribArray(vPosition);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
if (i == 0)
{
glVertexAttribPointer(vPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
}
else
{
glVertexAttribPointer(vPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(_v_size));
}
GLuint vUV = glGetAttribLocation(program, "vUV");
glEnableVertexAttribArray(vUV);
glBindBuffer(GL_ARRAY_BUFFER, uvbuffer);
if (i == 0)
{
glVertexAttribPointer(vUV, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
}
else
{
glVertexAttribPointer(vUV, 2, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(_u_size));
}
if (i == 0)
{
glUniform1i(TextureID, 0);
}
else
{
glUniform1i(TextureID, 1);
}
}
_scale = Scale(zoom, zoom, zoom);
_projection = Perspective(45.0, 4.0 / 3.0, 0.1, 100.0);
_view = LookAt(point4(Camera.x, Camera.y, Camera.z, 0), point4(0, 0, 0, 0), point4(0, 1, 0, 0));
_model = mat4(1.0); // identity matrix
_mvp = _projection * _view * _model;
MVP = glGetUniformLocation(program, "MVP");
theta = glGetUniformLocation(program, "theta");
Zoom = glGetUniformLocation(program, "Zoom");
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_CULL_FACE);
glClearColor(1.0, 1.0, 1.0, 1.0);
I understand that I have to switch between the active textures when drawing an object but I can't figure out how.
UPDATE
#immibis Ok I tried to do that yesterday but it didn't work but it was late and I was highly frustrated. SO just to get my thinking correct here, do I have to create a buffer every time (glGenBuffer) and then fill it, activate texture and then glDrawArrays or do I just create the buffer and then fill it every time with the different vetices and uvs for each object, set the offsets and then call glDrawArray for each object?
When I tried this originally I didn't know where the
glGetAttribLocation / glEnableVertexAttribArray /glBindBuffer
should go. So if I understand correctly every time I do a transformation like rotating around the x axis then buffers have to be filled etc so the code needs to go in the display function. Is that correct?
SOLVED
Ok so thanx to immibus' comments, it got me looking in a different direction. I was staring the whole time into how the data was pumped into the arrays that I never even looked at glDrawArrays. I was searching the web again and I came across a piece of code in a tutorial and the person explained glDrawArrays and I saw that you can tell it what to draw.
So then this became easy, as I originally thought it was supposed to be. I changed my code back to pumping everything in the buffers and since I have a start and stop property on my objects returned from my loader it was real easy to tell glDrawArrays what to do.
Thank you.
Well the origins of this questions lie there
https://stackoverflow.com/questions/20820456/strange-behavior-in-application-using-glfw
But I decided to simplify question with less text data and pictures(Now I use triangles)
I've got two objects - both objects are using vbo. I initalize every object using constructor and method init
Character::Character() {
glm::vec3 vert[] = {
glm::vec3(-.5f, -.5f, 0.0f) ,
glm::vec3(.5f, -.5f, 0.0f) ,
glm::vec3(0.0f, .5f, 0.0f)
};
vertices.insert(vertices.begin(), vert, vert + 3);
}
void Character::init() {
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
}
void Character::draw() {
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glDrawArrays(GL_TRIANGLES, 0, vertices.size());
glDisableVertexAttribArray(0);
}
In another class glfwinitializer I keep both objects in std::vector<Character>. So in function main I create two objects and then push_back them into vector.
Draw loop is simple - I iterate through vector and call draw method
while(!glfwWindowShouldClose(window))
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for(int i = 0; i < scene_items.size(); i++) {
scene_items[i]->draw();
}
this->navigator();
glfwSwapBuffers(window);
glfwPollEvents();
}
Method navigator calculates new position of objects when objects are selected. In every method, where I update data of vector vertices I call method
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
For example(method move_offset is called in navigator)
void Character::move_offset(double x_offset, double y_offset) {
for(int i = 0; i < vertices.size(); i++) {
vertices[i].x += x_offset;
vertices[i].y += y_offset;
}
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
};
But two triangles are not shown on the screen at the same time - when I select one object - another object disappears. When I click on disappeared triangle - it appears and another disappears. (There is also one triangle in the initial location but it cannot be moved)
Why? Is there problem with buffers?
EDIT: PROJECT REPRODUCING PROBLEM WITH ADDITIONAL LIBS
visual studio 2010 project (9 mb)
project with libs
I haven't looked at the full source code in the linked project, but from what you have pasted here, you seem not to call glBindBuffer() in your move_offset() method - thus overwriting the buffer of whatever object was last bound (probably the last one drawn in your loop).
I recently switched from intermediate mode and have a new rendering process. There must be something I am not understanding. I think it has something to do with the indices.
Here is my diagram: Region->Mesh->Polygon Array->3 vertex indices which references the master list of vertices.
Here my render code:
// Render the mesh
void WLD::render(GLuint* textures, long curRegion, CFrustum cfrustum)
{
int num = 0;
// Set up rendering states
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
// Set up my indices
GLuint indices[3];
// Cycle through the PVS
while(num < regions[curRegion].visibility.size())
{
int i = regions[curRegion].visibility[num];
// Make sure the region is not "dead"
if(!regions[i].dead && regions[i].meshptr != NULL)
{
// Check to see if the mesh is in the frustum
if(cfrustum.BoxInFrustum(regions[i].meshptr->min[0], regions[i].meshptr->min[2], regions[i].meshptr->min[1], regions[i].meshptr->max[0], regions[i].meshptr->max[2], regions[i].meshptr->max[1]))
{
// Cycle through every polygon in the mesh and render it
for(int j = 0; j < regions[i].meshptr->polygonCount; j++)
{
// Assign the index for the polygon to the index in the huge vertex array
// This I think, is redundant
indices[0] = regions[i].meshptr->poly[j].vertIndex[0];
indices[1] = regions[i].meshptr->poly[j].vertIndex[1];
indices[2] = regions[i].meshptr->poly[j].vertIndex[2];
// Enable texturing and bind the appropriate texture
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, textures[regions[i].meshptr->poly[j].tex]);
glVertexPointer(3, GL_FLOAT, sizeof(Vertex), &vertices[0].x);
glTexCoordPointer(2, GL_FLOAT, sizeof(Vertex), &vertices[0].u);
// Draw
glDrawElements(GL_TRIANGLES, 3, GL_UNSIGNED_INT, indices);
}
}
}
num++;
}
// End of rendering - disable states
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
}
Sorry if I left anything out. And I really appreciate feedback and help with this. I would even consider paying someone who is good with OpenGL and optimization to help me with this.
There is no point in using array rendering if you're only rendering 3 vertices at a time. The idea is to send thousands through with a single call. That is, you render a single "Polygon Array" or "Mesh" with one call.