I am trying to approximate a curved surface using quadrilateral patches. I did it using straight forward rendering using GL_QUADS and specifying the four vertices of the quad patch.
Now I am trying to get some performance using vertex buffers and overlayed array (verNor) of vertices and normals. The problem is that I get some random shapes but not the correct shape I got previously.
Here I am putting my code:
GLenum err = glewInit();
if (GLEW_OK != err){
std::cout<<"Filed to Initialize GLEW :: "<<glewGetErrorString(err)<<std::endl;
}
verNor = new GLfloat [NA*NP*6]; // NA and NP are number of points in lets say x and y axis
indices = new GLuint [(NA)*(NP)*4]; // When the tube is cut an spread out.
// VBOs
glGenBuffers(1, &vbo_tube); // Ask the GPU driver for a buffer array. "vbo" now has the ID
glGenBuffers(1, &ibo_indices);
// For Vertices and Normals which are interleved
glBindBuffer(GL_ARRAY_BUFFER, vbo_tube);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 6*NA*NP, NULL, GL_STATIC_DRAW);
// Obtaining the pointer to the memory in graphics buffer
buffer_verNor = glMapBuffer(GL_ARRAY_BUFFER,GL_WRITE_ONLY);
// For Indices
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo_indices);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(int) * 4*(NA-1)*(NP-1), NULL, GL_STATIC_DRAW);
buffer_indices = glMapBuffer(GL_ELEMENT_ARRAY_BUFFER,GL_WRITE_ONLY);
// Calculate the vertices of the points around the tube. Correctness guarenteed because I can draw exactly what I wanted
// using normal stright forward GL_QUADS that is drawing quad by quad and with out any VBOs
// Calculated vertices are stored in vPoints.
for (int i=0; i<NP; i++) {
for (int j=0; j<NA; j++) {
// Calculate the normals of each and every point above and store them in v3
// Storing the vertices
verNor[6*( (i)*NA+(j) )+0] = (GLfloat)vPoints[i*NA+j].GetX();
verNor[6*( (i)*NA+(j) )+1] = (GLfloat)vPoints[i*NA+j].GetY();
verNor[6*( (i)*NA+(j) )+2] = (GLfloat)vPoints[i*NA+j].GetZ();
// Storing the Normals
verNor[6*((i-1)*NA+(j-1))+3] = (GLfloat)v3.GetX();
verNor[6*((i-1)*NA+(j-1))+4] = (GLfloat)v3.GetY();
verNor[6*((i-1)*NA+(j-1))+5] = (GLfloat)v3.GetZ();
// Calculating the indices which form the quad
indices[4*((i)*NA+(j))+0] = (GLuint) (i)*NA+j ;
indices[4*((i)*NA+(j))+1] = (GLuint) (i+1)*NA+j ;
indices[4*((i)*NA+(j))+2] = (GLuint) (i+1)*NA+j+1 ;
indices[4*((i)*NA+(j))+3] = (GLuint) (i)*NA+j+1 ;
}
}
memcpy(buffer_verNor, verNor, 6*(NA)*(NP));
glUnmapBuffer(GL_ARRAY_BUFFER); // Unmapping the buffer
memcpy(buffer_indices, indices, 4*(NA-1)*(NP-1));
glUnmapBuffer(GL_ELEMENT_ARRAY_BUFFER);
glEnable(GL_LIGHTING);
// Performing the Vertex Buffer Stuff
// For Vertices and Normals
glBindBuffer(GL_ARRAY_BUFFER, vbo_tube);
glVertexPointer( 3, GL_FLOAT, 6*sizeof(GLfloat), (GLvoid*)((char*)NULL + 0*sizeof(GLfloat)) );
glNormalPointer( GL_FLOAT, 6*sizeof(GLfloat), (GLvoid*)(((char*)NULL)+3*sizeof(GLfloat)) );
// For Indices
// Mapping the indices_vbo memory here
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo_indices);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint)*4*(NA-1)*(NP-1), indices, GL_STATIC_DRAW);
// Enabling all the buffers and drawing the quad patches
glBindBuffer(GL_ARRAY_BUFFER, vbo_tube);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo_indices);
// Enabling normals and vertices to draw
glEnableClientState (GL_NORMAL_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
// Drawing the patches
glDrawElements(GL_QUADS, (NA-1)*(NP-1), GL_UNSIGNED_INT,(GLvoid*)((char*)NULL));
// Disabling the buffer objects for safety
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
glDeleteBuffers(1, &vbo_tube);
glDeleteBuffers(1, &ibo_indices);
The gird has NA by NP points so I have to draw (NP-1)*(NA-1) quads.
Also I can only get some thing(but not correct) drawn only when I give wrong offsets and stride in glVertexPointer() and glNormalPointer() function. Correct ones i think are
vertexPointer :: Stride - 6*sizeof(GLfloat) , offset - 0(last argument)
normalPointer :: Stride - 6*sizeof(GLfloat) , offset - 3*sizeof(GLfloat)
Related
Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 1 year ago.
Improve this question
I'm trying to create a simple particle system but I'm facing the segmentation fault error. My problem seems to be related to the number of vertex attributes I'm passing. One vertex has 3 floats, corresponding to x, y and z coordinates, so if I want to create 1 million particles, I'll have an array of 3 million floats containing the positions coordinates for each particle. The problem is that if I go above a certain number of particles, like around 400000, I get a segmentation fault.
Here's part of the code:
#define NP 1000000
int main(void)
{
// Init GLFW
MyGLFW myglfw(4, 5, W, H, "Particles");
// Create shader program
Shader shader("shaders/shader.vs", "shaders/shader.fs");
// Define particle's vertex attrib
float particleData[NP * 3];
for(uint i = 0; i < NP * 3; i++)
{
// Creates a new particle with random position in the given range
Particle p(-1.0f, 1.0f);
particleData[i++] = p.pos[0];
particleData[i++] = p.pos[1];
particleData[i] = p.pos[2];
}
// Create Vertex Buffer Object (VBO) to store vertices into GPU memory
uint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(particleData), particleData, GL_STATIC_DRAW);
// Position
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
// Render loop
while(!glfwWindowShouldClose(myglfw.getWindow()))
{
// Check input
myglfw.processInput();
// Rendering commands
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
shader.use();
// Draw triangle through VAO
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, NP * 3);
glBindVertexArray(0);
// GLFW: swap buffers and poll IO events (e.g. keys pressed, released, mouse moved, etc.)
glfwSwapBuffers(myglfw.getWindow());
glfwPollEvents();
}
// GLFW: terminate, clearing all previously allocated GLFW resources
myglfw.terminate();
return 0;
}
I manage to write around 400000-500000 particles and above that I get a segmentation fault. GDB says that the segmentation signal comes right at the first line of the main, which I don't understand. I also tried to set the number of particles by setting a long int nr = 1000000 directly in the main function, but I get the same error, and in this way GDB gives me an error at float particleData[nr];.
I'm writing and running my code on a Linux system using VS Code, my GPU is a GTX 1070 FE.
As per the comment, the use of...
float particleData[NP * 3];
is, potentially, causing a stack overflow. Rather than allocate such a large array on the stack you should consider using std::vector instead. The following is your code with (hopefully) the minimal necessary modifications (look for #G.M. )...
#define NP 1000000
int main(void)
{
// Init GLFW
MyGLFW myglfw(4, 5, W, H, "Particles");
// Create shader program
Shader shader("shaders/shader.vs", "shaders/shader.fs");
// Define particle's vertex attrib
std::vector<float> particleData(NP * 3); /* #G.M. */
for(uint i = 0; i < NP * 3; i++)
{
// Creates a new particle with random position in the given range
Particle p(-1.0f, 1.0f);
particleData[i++] = p.pos[0];
particleData[i++] = p.pos[1];
particleData[i] = p.pos[2];
}
// Create Vertex Buffer Object (VBO) to store vertices into GPU memory
uint VBO, VAO;
glGenVertexArrays(1, &VAO);
glGenBuffers(1, &VBO);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(particleData[0]) * particleData.size(), particleData.data(), GL_STATIC_DRAW); /* #G.M. */
// Position
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
// Render loop
while(!glfwWindowShouldClose(myglfw.getWindow()))
{
// Check input
myglfw.processInput();
// Rendering commands
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
// Activate shader
shader.use();
// Draw triangle through VAO
glBindVertexArray(VAO);
glDrawArrays(GL_POINTS, 0, NP * 3);
glBindVertexArray(0);
// GLFW: swap buffers and poll IO events (e.g. keys pressed, released, mouse moved, etc.)
glfwSwapBuffers(myglfw.getWindow());
glfwPollEvents();
}
// GLFW: terminate, clearing all previously allocated GLFW resources
myglfw.terminate();
return 0;
I have an array of 131072 values to draw in opengl with shaders. The coordinate of each point is calculated with the indice of the value, but i can't draw that. Now i have an error in the glDrawArrays.
This is part of my code set the vao and vbo, imagen is a CGfloat pointer with the data;
int pixels = 131072;
// Create vertex array objects
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
// Create vertex buffers
glGenBuffers(1, &vbo);
// VBO for coordinates of first square
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER,
pixels * sizeof(GLfloat),
imagen,
GL_STATIC_DRAW);
glVertexAttribPointer(0, pixels, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
and this is my display function:
void display(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindVertexArray(vao);
glDrawArrays(GL_POINTS, 0, 1);
glBindVertexArray(0);
glutSwapBuffers();
glutPostRedisplay();
}
if i pass an array to shader how can handle the array to calculate the coordinates with the index of each value???
Edit
This is how calculate the coordinates of each point with the index of the array, if i have one cube of 64x64x32 pixels i do this:
XX = 64;
YY = 64;
ZZ = 32;
x = index % XX;
y = (index / XX) % YY;
z = (int) floor((double) index / (XX * YY));
And with the value of the each element of the array calculate the color of that point
Edit 2
This is the image that i get when i draw all points and i need fill this object and get a volume
I have a working Vertex-Buffer-Object but I need to add the normals.
The normales are stored in the same array as the vertex positons. They are interleaved
Vx Vy Vz Nx Ny Nz
This is my code so far:
GLfloat values[NUM_POINTS*3 + NUM_POINTS*3];
void initScene() {
for(int i = 0; i < (NUM_POINTS) ; i = i+6){
values[i+0] = bunny[i];
values[i+1] = bunny[i+1];
values[i+2] = bunny[i+2];
values[i+3] = normals[i];
values[i+4] = normals[i+1];
values[i+5] = normals[i+2];
}
glGenVertexArrays(1,&bunnyVAO);
glBindVertexArray(bunnyVAO);
glGenBuffers(1, &bunnyVBO);
glBindBuffer(GL_ARRAY_BUFFER, bunnyVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(bunny), bunny, GL_STATIC_DRAW);
glVertexAttribPointer(0,3, GL_FLOAT, GL_FALSE, 0,0);
glEnableVertexAttribArray(0);
glGenBuffers(1, &bunnyIBO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, bunnyIBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(triangles), triangles, GL_STATIC_DRAW);
// unbind active buffers //
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
void renderScene() {
if (bunnyVBO != 0) {
// x: bind VAO //
glEnableClientState(GL_VERTEX_ARRAY);
glBindVertexArray(bunnyVAO);
glDrawElements(GL_TRIANGLES, NUM_TRIANGLES, GL_UNSIGNED_INT, NULL);
glDisableClientState(GL_VERTEX_ARRAY);
// unbind active buffers //
glBindVertexArray(0);
}
}
I can see something on the screen but it is not right as the normals are not used correctly...
How can I use the values array correctly connected with my code so far.
You need to call glVertexAttribPointer two times, once for the vertices and once for the normals. This is how you tell OpenGL how your data is layed out inside your vertex buffer.
// Vertices consist of 3 floats, occurring every 24 bytes (6 floats),
// starting at byte 0.
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 24, 0);
// Normals consist of 3 floats, occurring every 24 bytes starting at byte 12.
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 24, 12);
This is assuming that your normal attribute in your shader has an index of 1.
I decided to import Wavefront .OBJ format to a test-scene that I'm working on. I get the model (vertices) to be in the right place and it displays fine. When I then apply a texture a lot of things looks distorted. I checked my Maya scene (there it looks good), and the object has many more uv-coordinates than vertex positions (this is what makes the scene looks weird in OpenGL, is my guess).
How would I go about loading a scene like that. Do I need to duplicate vertices and how do I store it in the vertex-buffer object?
You are right that you have to duplicate the vertices.
In addition to that you have to sort them in draw order, meaning that you have to order the vertices with the same offsets as the texture coordinates and normals.
basically you'll need this kind of structure:
float *verts = {v1_x,v1_y,v1_z,v1_w,v2_x,v2_y,v2_z,v2_w,...};
float *normals = {n1_x,n1_y,n1_z,n2_x,n2_y,n2_z,...};
float *texcoords = {t1_u,t1_v,t1_w,t2_u,t2_v,t2_w,...};
This however would mean that you have at least 108bytes per Triangle.
3(vert,norm,tex)
*3(xyz/uvw)
*3(points in tri)
*4(bytes in a float))
-----------------------
= 108
You can significantly reduce that number by only duplicating the vertices that actually are duplicate (have identical texture coordinate,vertices and normals meaning: smoothed normals and no UV borders) and using an Index Buffer Object to set the draw order.
I faced the same problem recently in a small project and I just split the models along the hard-edges and UV-Shell borders therefore creating only the necessary duplicate Vertices. Then I used the glm.h and glm.cpp from Nate Robins and copied/sorted the normals and texture coordinates in the same order as the vertices.
Then setup the VBO and IBO:
//this is for Data that does not change dynamically
//GL_DYNAMIC_DRAW and others are available
GLuint mDrawMode = GL_STATIC_DRAW;
//////////////////////////////////////////////////////////
//Setup the VBO
//////////////////////////////////////////////////////////
GLuint mId;
glGenBuffers(1, &mId);
glBindBuffer(GL_ARRAY_BUFFER, mId);
glBufferData(GL_ARRAY_BUFFER,
mMaxNumberOfVertices * (mVertexBlockSize + mNormalBlockSize + mColorBlockSize + mTexCoordBlockSize),
0,
mDrawMode);
glBufferSubData(GL_ARRAY_BUFFER, mVertexOffset, numberOfVertsToStore * mVertexBlockSize, vertices);
glBufferSubData(GL_ARRAY_BUFFER, mNormalOffset, numberOfVertsToStore * mNormalBlockSize, normals);
glBufferSubData(GL_ARRAY_BUFFER, mColorOffset, numberOfVertsToStore * mColorBlockSize, colors);
glBufferSubData(GL_ARRAY_BUFFER, mTexCoordOffset, numberOfVertsToStore * mTexCoordBlockSize, texCoords);
//////////////////////////////////////////////////////////
//Setup the IBO
//////////////////////////////////////////////////////////
GLuint IBOId;
glGenBuffers(1, &IBOId);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IBOId);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, mMaxNumberOfIndices * sizeof(GLuint), 0, mDrawMode);
glBufferSubData(GL_ELEMENT_ARRAY_BUFFER, 0, numberOfIndicesToStore * sizeof(GLuint), indices);
//////////////////////////////////////////////////////////
//This is how to draw the object
//////////////////////////////////////////////////////////
glBindBuffer(GL_ARRAY_BUFFER, mId);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, IBOId);
//Enables and Disables are only necessary each draw
//when they change between objects
glEnableClientState(GL_VERTEX_ARRAY);
glVertexPointer(mVertexComponents, GL_FLOAT, 0, (void*)mVertexOffset);
if(mNormalBlockSize){
glEnableClientState(GL_NORMAL_ARRAY);
glNormalPointer(GL_FLOAT, 0, (void*)mNormalOffset);
}
if(mColorBlockSize){
glEnableClientState(GL_COLOR_ARRAY);
glColorPointer(mColorComponents, GL_FLOAT, 0, (void*)mColorOffset);
}
if(mTexCoordBlockSize){
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glTexCoordPointer(mTexCoordComponents, GL_FLOAT, 0, (void*)mTexCoordOffset);
}
glDrawRangeElements(primMode,
idFirstVertex,
idLastVertex,
idLastVertex - idFirstVertex + 1,
mAttachedIndexBuffer->getDataType(),
0);
if(mTexCoordBlockSize)
glDisableClientState(GL_TEXTURE_COORD_ARRAY);
if(mColorBlockSize)
glDisableClientState(GL_COLOR_ARRAY);
if(mNormalBlockSize)
glDisableClientState(GL_NORMAL_ARRAY);
glDisableClientState(GL_VERTEX_ARRAY);
I have trouble rendering some geometry by using a vertex buffer object. I intend to draw a plane of points, so basically one vertex at every discrete position in my space. However, I cannot render that plane, as every time I call glDrawElements(...), application crashes returning an access violation exception. There must be some mistake while initialization, I guess.
This is what I have so far:
#define SPACE_X 512
#define SPACE_Z 512
typedef struct{
GLfloat x, y, z; // position
GLfloat nx, ny, nz; // normals
GLfloat r, g, b, a; // colors
} Vertex;
typedef struct{
GLuint i; // index
} Index;
// create vertex buffer
GLuint vertexBufferObject;
glGenBuffers(1, &vertexBufferObject);
// create index buffer
GLuint indexBufferObject;
glGenBuffers(1, &indexBufferObject);
// determine number of vertices / primitives
const int numberOfVertices = SPACE_X * SPACE_Z;
const int numberOfPrimitives = numberOfVertices; // As I'm going to render GL_POINTS, number of primitives is the same as number of vertices
// create vertex array
Vertex* vertexArray = new Vertex[numberOfVertices];
// create index array
Index* indexArray = new Index[numberOfPrimitives];
// create planes (vertex array)
// color of the vertices is red for now
int index = -1;
for(GLfloat x = -SPACE_X / 2; x < SPACE_X / 2; x++) {
for(GLfloat z = -SPACE_Z / 2; z < SPACE_Z / 2; z++) {
index++;
vertexArray[index].x = x;
vertexArray[index].y = 0.0f;
vertexArray[index].z = z;
vertexArray[index].nx = 0.0f;
vertexArray[index].ny = 0.0f;
vertexArray[index].nz = 1.0f;
vertexArray[index].r = 1.0;
vertexArray[index].g = 0.0;
vertexArray[index].b = 0.0;
vertexArray[index].a = 1.0;
}
}
// bind vertex buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
// buffer vertex array
glBufferData(GL_ARRAY_BUFFER, numberOfVertices * sizeof(Vertex), vertexArray, GL_DTREAM_DRAW);
// bind vertex buffer again
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferObject);
// enable attrib index 0 (positions)
glEnableVertexAttribArray(0);
// pass positions in
glVertexAttribPointer((GLuint)0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), vertexArray);
// enable attribute index 1 (normals)
glEnableVertexAttribArray(1);
// pass normals in
glVertexAttribPointer((GLuint)1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), &vertexArray[0].nx);
// enable attribute index 2 (colors)
glEnableVertexAttribArray(2);
// pass colors in
glVertexAttribPointer((GLuint)2, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), &vertexArray[0].r);
// create index array
for(GLunit i = 0; i < numberOfPrimitives; i++) {
indexArray[i].i = i;
}
// bind buffer
glBindBuffer(GL_ELEMENET_ARRAY_BUFFER, indexBufferObject);
// buffer indices
glBufferData(GL_ELEMENET_ARRAY_BUFFER, numberOfPrimitives * sizeof(Index), indexArray, GL_STREAM_DRAW);
// bind buffer again
glBindBuffer(GL_ELEMENET_ARRAY_BUFFER, indexBufferObject);
// AND HERE IT CRASHES!
// draw plane of GL_POINTS
glDrawElements(GL_POINTS, numberOfPrimitives, GL_UNSIGNED_INT, indexArray);
// bind default buffers
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// delete vertex / index buffers
glDeleteBuffers(1, &vertexBufferObject);
glDeleteBuffers(1, &indexBufferObject);
delete[] vertexArray;
vertexArray = NULL;
delete[] indexArray;
indexArray = NULL;
When you are using buffer objects, the last parameters in the gl*Pointer and 4th parameter in glDrawElements are no longer addresses in main memory (yours still are!), but offsets into the buffer objects. Make sure to compute these offsets in bytes! The "offsetof" macro is very helpful there.
Look at the second example on this page and compare it to what you did: http://www.opengl.org/wiki/VBO_-_just_examples
And you have one typo: GL_DTREAM_DRAW.
The method glEnableClientState(...) is deprecated! Sorry, for some reason I had overseen that fact.