Modifying a Shader Storage Buffer Object from within a Vertex Shader - c++

In my base program (C++/OpenGL 4.5) I have copied the content of the Vertex Buffer to an Shader Storage Buffer (SSBO):
float* buffer = (float*) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, ssbo[2]);
glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(GLfloat)*size,buffer, GL_DYNAMIC_DRAW);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, 0);
glUnmapBuffer(GL_ARRAY_BUFFER);
In the Vertex Shader this data is bound to an array:
#version 430
#extension GL_ARB_shader_storage_buffer_object : require
layout(shared, binding = 3) buffer storage
{
float array[];
}
But when I'm trying to overwrite an array entry in the main function like this:
array[index_in_bounds] = 4.2;
nothing happens.
What am I doing wrong? Can I change the buffer from within the Vertex Shader? Is this only possible in a Geometry Shader? Do I have to do this with Transform Feedback (that I have never used before)?
edit:
I'm mapping the buffers for test purposes in my main program, just to see if the data changes:
float* buffer = (float*) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
float* ssbo = (float*) glMapNamedBuffer(3, GL_READ_ONLY);
for(int i = 0; i < SIZE_OF_BUFFERS; i++)
printf("% 5f | % 5f\n", ssbo[i], buffer[i]);
glUnmapNamedBuffer(3);
glUnmapBuffer(GL_ARRAY_BUFFER);

Okay, I have found the problem using the red book. I have not bound the buffer correctly and binding the buffer base has to happen after buffering the data:
float* buffer = (float*) glMapBuffer(GL_ARRAY_BUFFER, GL_READ_ONLY);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, &ssbo); // bind buffer
// switched these two:
glBufferData(GL_SHADER_STORAGE_BUFFER, sizeof(GLfloat)*size,buffer, GL_DYNAMIC_DRAW);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 3, &ssbo);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); // unbind buffer
glUnmapBuffer(GL_ARRAY_BUFFER);

Related

Rendering multiple meshes (same VBO) + all indices in same EBO (OpenGL)

I'm trying to minimize memory fragmentation on the GPU by storing all vertex data in the same VBO.
After parsing the .obj nodes with Assimp and extracting all the mesh data, I've alloc'd two large buffers that can contain everything and dumped everything in them (one vertex array and another indices array). Rendering all the data works fine with glDrawArrays, but this is not the result I'm trying to obtain.
struct vertex_data
{
glm::vec3 Position;
glm::vec3 Normal;
glm::vec2 TexCoords;
};
uint32_t TotalVertices = 0;
uint32_t TotalIndices = 0;
uint32_t TotalMeshes = Model->NextMeshToLoad;
if (TotalMeshes != 0)
{
for (uint32_t MeshOffset = 0;
MeshOffset < TotalMeshes;
++MeshOffset)
{
mesh *Mesh = Model->Meshes + MeshOffset;
uint32_t MeshTotalVertices = Mesh->NextVertexData;
uint32_t MeshTotalIndices = Mesh->NextIndice;
TotalVertices += MeshTotalVertices;
TotalIndices += MeshTotalIndices;
}
vertex_data *CombinedVertexData = PushArray(Arena, TotalVertices, vertex_data);
uint32_t *CombinedIndices = PushArray(Arena, TotalIndices, uint32_t);
uint32_t CombinedVertexDataOffset = 0;
uint32_t CombinedIndicesOffset = 0;
for (uint32_t MeshOffset = 0;
MeshOffset < TotalMeshes;
++MeshOffset)
{
mesh *Mesh = Model->Meshes + MeshOffset;
uint32_t MeshTotalVertices = Mesh->NextVertexData;
memcpy_s(CombinedVertexData + CombinedVertexDataOffset, TotalVertices * sizeof(vertex_data),
Mesh->VertexData, MeshTotalVertices * sizeof(vertex_data));
CombinedVertexDataOffset += MeshTotalVertices;
uint32_t MeshTotalIndices = Mesh->NextIndice;
memcpy_s(CombinedIndices + CombinedIndicesOffset, TotalIndices * sizeof(uint32_t),
Mesh->Indices, MeshTotalIndices * sizeof(uint32_t));
CombinedIndicesOffset += MeshTotalIndices;
}
Model->CombinedVertexData = CombinedVertexData;
Model->CombinedIndicesData = CombinedIndices;
Model->CombinedVertexDataSize = TotalVertices;
Model->CombinedIndicesDataSize = TotalIndices;
Model->DataStatus = model_data_status::PENDING_UPLOAD;
Model->NextMeshToLoad stores the next empty array location in which it can store a mesh. It also represents the current size of stored meshes - not to be confounded with the maximum size alloc'd for the array.
Mesh->NextVertexData and Mesh->NextIndice work the same, but for each mesh in part.
Uploading the data to the GPU:
if (Model->DataStatus == model_data_status::PENDING_UPLOAD)
{
glGenVertexArrays(1, &Model->VertexArrayObject);
glGenBuffers(1, &Model->VertexBufferObject);
glGenBuffers(1, &Model->ElementBufferObject);
glBindVertexArray(Model->VertexArrayObject);
glBindBuffer(GL_ARRAY_BUFFER, Model->VertexBufferObject);
glBufferData(GL_ARRAY_BUFFER, Model->CombinedVertexDataSize * sizeof(vertex_data), Model->CombinedVertexData, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, Model->ElementBufferObject);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, Model->CombinedIndicesDataSize * sizeof(uint32_t), &Model->CombinedIndicesData[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_data), 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_data), (void *)(sizeof(glm::vec3)));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(vertex_data), (void *)(2 * sizeof(glm::vec3)));
glBindVertexArray(0);
}
The problem is that each mesh has a different/separate material. This forces me to issue separate draw calls for each mesh (need to set different uniform properties + bind different textures). I know the vertex and indices size for each mesh in part, so I figured I could just call glDrawElements and use the 4th parameter as an offset into the ELEMENT_ARRAY_BUFFER:
glBindVertexArray(Model->VertexArrayObject);
uint32_t IndicesOffset = 0;
uint32_t MeshesSize = Model->NextMeshToLoad;
for (uint32_t MeshIndex = 0;
MeshIndex < MeshesSize;
++MeshIndex)
{
mesh *Mesh = Model->Meshes + MeshIndex;
uint32_t IndicesDataSize = Mesh->NextIndice;
glDrawElements(GL_TRIANGLES, IndicesDataSize, GL_UNSIGNED_INT, (void *)(IndicesOffset * sizeof(uint32_t)));
IndicesOffset += Mesh->NextIndice;
}
glBindVertexArray(0);
Unfortunately this doesn't seem to work... My data is only partially rendered and I can't understand why this is happening. I've checked the data loaded from the .obj file and it's fine. I've also checked the combined vertex data & the combined indices data... it's all there. But my meshes aren't all there on the screen :( (I've enabled wireframe mode so I can better visualize the issue)
If I try to call glDrawArrays on the combined vertex data, it's all working fine and dandy:
At this moment, both the combined vertex and indices arrays register a size of 57174 elements each. Is it not possible for OpenGL to properly map the indices offset to the vertices data internally and properly render? If I use one VBO + EBO per mesh, everything renders fine, so I know the data I'm receiving is not corrupted (further evidenced by glDrawArrays on the combined vertex data). I've been reading and trying all sorts of solutions without anything working... Pretty much at my wits end. Would really appreciate some help! Thanks!

Using multiple VBO in a VAO

I try to use 2 VBO inside a VAO and I end up with a crash (far beyond my app).
The idea is to make a first VBO (and optionnally an IBO) to stucture the geometry.
This worked well, until I get the idea to add a second VBO for the model matrix as a vertex attribute instead of an uniform.
So, when I declare my mesh I do as follow (reduced code) :
GLuint vao = 0;
glCreateVertexArrays(1, &vao);
glBindVertexArray(vao);
GLuint vbo = 0;
glCreateBuffers(1, &vbo);
glNamedBufferStorage(vbo, ...); // Fill the right data ...
for ( ... my attributes ) // Position, normal, texcoords ...
{
glVertexArrayAttribFormat(vao, attribIndex, size, GL_FLOAT, GL_FALSE, relativeOffset);
glVertexArrayAttribBinding(vao, attribIndex, bindingIndex);
glEnableVertexArrayAttrib(vao, attribIndex);
} -> this gives me the "stride" parameter for below.
glVertexArrayVertexBuffer(vao, 0/*bindingindex*/, vbo, 0, stride/*Size of one element in vbo in bytes*/);
GLuint ibo = 0;
glCreateBuffers(1, &ibo);
glNamedBufferStorage(ibo, ...); // Fill the right data ...
glVertexArrayElementBuffer(vao, ibo);
Until there, everything is fine, all I have to do is to call glBindVertexArray() and a glDrawXXX() command, I have something perfect on screen.
So, I decided to remove the modelMatrix uniform from the shader to use a mat4 attribute,
I could have choose an UBO instead but I want to extend the idea to instancing rendering by providing several matrices.
So, I tested with one model matrix in a VBO and just before the rendering, I do as follow (the VBO is built the same way before, I just put 16 floats for an identity matrix) :
glBindVertexArray(theObjectVAOBuiltBefore);
const auto bindingIndex = static_cast< GLuint >(1); // Here next binding point for the VBO, I guess...
const auto stride = static_cast< GLsizei >(16 * sizeof(GLfloat)); // The stride is the size in bytes of a matrix
glVertexArrayVertexBuffer(theObjectVAOBuiltBefore, bindingIndex, m_vertexBufferObject.identifier(), 0, stride); // I add the new VBO to the currentle VAO which have already a VBO (bindingindex 0) and an IBO
// Then I describe my new VBO as a matrix of 4 vec4.
const auto size = static_cast< GLint >(4);
for ( auto columnIndex = 0U; columnIndex < 4U; columnIndex++ )
{
const auto attribIndex = static_cast< unsigned int >(VertexAttributes::Type::ModelMatrix) + columnIndex;
glVertexArrayAttribFormat(theObjectVAOBuiltBefore, attribIndex, size, GL_FLOAT, GL_FALSE, 0);
glVertexArrayAttribBinding(theObjectVAOBuiltBefore, attribIndex, bindingIndex);
glEnableVertexArrayAttrib(theObjectVAOBuiltBefore, attribIndex);
glVertexAttribDivisor(attribIndex, 1); // Here I want this attribute per instance.
}
glDrawElementsInstanced(GL_TRIANGLES, count, GL_UNSIGNED_INT, nullptr, 1);
And the result is a beautiful crash, I don't have any clue because the crash occurs within the driver where I can't have a debug output.
Is my idea a complete garbage ? Or there is something I missed ?
I found the error glVertexAttribDivisor() is part of the old ways (like glVertexAttribPointer(), ...), I switched to glVertexBindingDivisor()/glVertexArrayBindingDivisor() and now there is no crash at all.
Answers were there : https://www.khronos.org/opengl/wiki/Vertex_Specification#Separate_attribute_format

OpenGL will only update the last vertex array object

I am writing some code that generates some VAO and then when the physics have been updated a call is made to update the vertices inside the VAOs and then a call is made to redraw these objects.
The problem with my code is that only the last VAO is being updated by UpdateScene. The following two functions create the buffers.
void BuildBuffers(std::vector<demolish::Object>& objects)
{
VAO = new UINT[objects.size()];
glGenVertexArrays(objects.size(),VAO);
int counter = 0;
for(auto& o:objects)
{
if(o.getIsSphere())
{
BuildSphereBuffer(o.getRad(),o.getLocation(),counter);
counter++;
}
else
{
}
}
}
void BuildSphereBuffer(float radius,std::array<iREAL,3> position,int counter)
{
GeometryGenerator::MeshData meshObj;
geoGenObjects.push_back(meshObj);
geoGen.CreateSphere(radius,30,30,meshObj,position);
VAOIndexCounts.push_back(meshObj.Indices.size());
glGenBuffers(2,BUFFERS);
glBindVertexArray(VAO[counter]);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_NORMAL_ARRAY);
glBindBuffer(GL_ARRAY_BUFFER,BUFFERS[0]);
glBufferData(GL_ARRAY_BUFFER,
meshObj.Vertices.size()*sizeof(GLfloat)*11,
&meshObj.Vertices.front(), GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, BUFFERS[1]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
meshObj.Indices.size() * sizeof(UINT),
&meshObj.Indices.front(), GL_STATIC_DRAW);
glVertexPointer(3, GL_FLOAT,sizeof(GLfloat)*11, 0);
glNormalPointer(GL_FLOAT,sizeof(GLfloat)*11,(GLvoid*)(3*sizeof(GLfloat)));
}
Then the following function updates the buffers when it is called.
void UpdateScene(float dt, std::vector<demolish::Object>& objects)
{
float x = radius*sinf(phi)*cosf(theta);
float z = radius*sinf(phi)*sinf(theta);
float y = radius*cosf(phi);
AV4FLOAT position(x,y,z,1.0);
AV4FLOAT target(0.0,0.0,0.0,0.0);
AV4FLOAT up(0.0,1.0,0.0,0.0);
viewModelMatrix = formViewModelMatrix(position,target,up);
for(int i=0;i<objects.size();i++)
{
geoGen.CreateSphere(objects[i].getRad(),
30,
30,
geoGenObjects[i],
objects[i].getLocation());
VAOIndexCounts[i] = geoGenObjects[i].Indices.size();
glBindVertexArray(VAO[i]);
glBufferSubData(GL_ARRAY_BUFFER,
0,
geoGenObjects[i].Vertices.size()*sizeof(GLfloat)*11,
&geoGenObjects[i].Vertices.front());
}
RedrawTheWindow();
}
The problem with this code is that it is not updating all of the buffers, only the "last" one. For instance if objects has size 3 then even if the locations of all three objects change only the last buffer is being updated with the new vertices.
I have narrowed it down to OpenGL but I am not sure what I am doing wrong.
Binding the Vertex Array Object doesn't bind any array buffer object.
If you want to change the content of an array buffer, then you have to bind the array buffer:
GLuint VBO = .....; // VBO which corresponds to VAO[i]
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferSubData(
GL_ARRAY_BUFFER, 0,
geoGenObjects[i].Vertices.size()*sizeof(GLfloat)*11,
&geoGenObjects[i].Vertices.front());
Note, a vertex array object may refer to a different array buffer object, for each attribute. So which one should be bound?
Since OpenGL 4.5 you can do this by the direct state access version too.
See glNamedBufferSubData:
glNamedBufferSubData (
VBO, 0,
geoGenObjects[i].Vertices.size()*sizeof(GLfloat)*11,
&geoGenObjects[i].Vertices.front());
If the vertex array object is bound, then a named array buffer object which is bound to a binding point can be queried by glGetVertexAttribIuiv using the parameter GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING:
e.g.:
glBindVertexArray(VAO[i]);
GLuint VBO;
glGetVertexAttribIuiv(0, GL_VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, &VBO);

transform feedback empty buffer

EDIT: I simply forgot to bind the shader. Now it's working.
I am currently trying to get transform feedback to run but it doesn't. I am using OpenGL 3.3 and followed the steps from this tutorial converting it to Java and LWJGL.
Shader code:
#version 330 core
in float inValue;
out float outValue;
void main(){
outValue = sqrt(inValue);
}
Shader class code:
... loading shaders, uniforms, attribute locations etc. ...
public void setTransformFeedbackVaryings(String[] varyings, boolean interleaved){
int bufferMode = interleaved ? GL_INTERLEAVED_ATTRIBS : GL_SEPERATE_ATTRIBS;
glTransformFeedbackVaryings(programID, varyings, bufferMode);
}
public void compile(){
glLinkProgram(programID);
glValidateProgram(programID);
... //error catching
}
Other:
...
//sets the list of feedback varyings names with GL_INTERLEAVED_ATTRIBS
shader.setTransformFeedbackVaryings(new String[]{"outValue"}, true);
//linking and validating shader
shader.compile();
attribLocation = shader.getAttribLocation("inValue");
//create VAO
VAO = glGenVertexArrays();
glBindVertexArrayObject(VAO);
//create data buffer
FloatBuffer buffer = ... //contains the values to be send to the shader
//create VBO
VBO = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, buffer, GL_STATIC_DRAW);
glEnableVertexAttribArray(attribLocation);
glVertexAttribPointer(attribLocation, 1, GL_FLOAT, false, 0, 0);
//create test buffer
FloatBuffer test = ... //some values
//create transform feedback buffer
TBO = glGenBuffers();
glBindBuffer(GL_ARRAY_BUFFER, TBO);
glBufferData(GL_ARRAY_BUFFER, test, GL_STATIC_READ);
//perform feedback
glEnable(GL_RASTERIZER_DISCARD);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, TBO);
glBeginTransformFeedBack(GL_POINTS);
glDrawArrays(GL_POINTS, 0, NUM_VALUES);
glEndTransformFeedback();
glDisable(GL_RASTERIZER_DISCARD);
glFlush();
//read data
FloatBuffer feedback = ... //empty buffer
glGetBufferSubData(GL_TRANSFORM_FEEDBACK_BUFFER, 0, feedback);
The values I get from the feedback buffer are those from the test buffer. Hence writing and reading the buffers seems to work. When leaving out the test buffer and loading an empty one to the TBO the results are all 0. I tried replacing the GL_ARRAY_BUFFER with GL_TRANSFORM_FEEDBACK_BUFFER when using the TBO as I read here but it didn't work either.
I don't get any OpenGL Errors btw.

openGL glUnmapBuffer() returning unknown error code 1280

I am writing some proof-of-concept code. I want to prove that I can write data to a buffer object after the buffer has been created. However, I am getting a a GLenum error code of 1280 when I try to unmap the buffer after writing to it. I am completely stymied.
I can initialize the buffer the with color data and successfully render it. The problem is that I cannot modify the data in the buffer afterwards. The code is below. It shows how I write the new data to the buffer and then how I try to read it back. The error codes are shown in comments after the glGetError() calls. The variable "cbo" is the color buffer:
//NEW COLOR DATA
GLubyte colorData2[9] = {255,255,0, 0,128,255, 255,0,255};
//WRITE THE DATA TO THE COLOR BUFFER OBJECT (variable cbo)
glBindBuffer(GL_ARRAY_BUFFER, cbo);
int err1 = glGetError(); //Error code 0
//Oddly, glMapBuffer always returns and invalid pointer.
//GLvoid * pColor = glMapBuffer(GL_ARRAY_BUFFER, GL_MAP_WRITE_BIT);
//However, glMapBufferRange return a pointer that looks good
GLvoid * pColor = glMapBufferRange(GL_ARRAY_BUFFER, 0, 9, GL_MAP_WRITE_BIT);
int err2 = glGetError(); //Error code 0
// Copy colors from host to device
memcpy(pColor, colorData2, 9);
//Unmap to force host to device copy
glUnmapBuffer(cbo);
int err3 = glGetError(); //Error code 1280
//Unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
int err4 = glGetError(); //Error code 0
//******TEST THE WRITE******
GLubyte readbackData[9];
glBindBuffer(GL_ARRAY_BUFFER, cbo);
int err5 = glGetError(); //Error code 0
GLvoid * pColorX = glMapBufferRange(GL_ARRAY_BUFFER, 0, 9, GL_MAP_READ_BIT);
int err6 = glGetError(); //Error code 1282
//Mem copy halts because of a memory exception.
memcpy(readbackData, pColorX, 9);
glUnmapBuffer(cbo);
glBindBuffer(GL_ARRAY_BUFFER, 0);
Here is the code where I created the buffer object:
//Create color buffer
glGenBuffers(1, &cbo);
glBindBuffer(GL_ARRAY_BUFFER, cbo);
//Create space for three RGB 8-bit color objects
colorBufferSize = 3 * numColorChannels * sizeof(GLubyte);
glBufferData(GL_ARRAY_BUFFER, colorBufferSize, colorData, GL_DYNAMIC_DRAW);
//Unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
1280, or 0x0500, is GL_INVALID_ENUM.
glUnmapBuffer takes the enum where the buffer object is bound, not the buffer object to unmap. glUnmapBuffer expects the buffer object to be unmapped to be bound to that binding target. So glUnmapBuffer(GL_ARRAY_BUFFER) will unmap whatever is currently bound to the GL_ARRAY_BUFFER binding.