glDrawTransformFeedback not write to buffer - opengl

I try to use transform features to create a particle system, so there are 2 tranform buffer and 2 array buffer, use ping-pong to switch.
And my shader is simple, just keep add source position.
gs_Position = vs_Position[0] + vec3(1, 0, 1);
If use glDrawTransformFeedback(), 2 buffer will keep original data, not change.But if I switch to glDrawArrays(), then ping-pong works, position value keep increase.
Initial code
shader.setupShaderFromFile(GL_VERTEX_SHADER, "feedback.vert");
shader.setupShaderFromFile(GL_GEOMETRY_SHADER, "feedback.geom");
const GLchar* feedbackVaryings[] = { "gs_Position", "gs_Velocity" };
glTransformFeedbackVaryings(shader.getProgram(), 2, feedbackVaryings, GL_INTERLEAVED_ATTRIBS);
shader.linkProgram();
glGenTransformFeedbacks(2, tfo);
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
Particle* p = new Particle[2];
p[0].position = glm::vec3(1, 2, 3);
p[1].position = glm::vec3(2, 3, 4);
glGenBuffers(2, bfo);
glBindBuffer(GL_ARRAY_BUFFER, bfo[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(Particle)*2, p, GL_STATIC_DRAW);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, tfo[0]);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, bfo[0]);
glBindBuffer(GL_ARRAY_BUFFER, bfo[1]);
glBufferData(GL_ARRAY_BUFFER, sizeof(Particle)*2, nullptr, GL_STATIC_READ);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, tfo[1]);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, bfo[1]);
perform transform
int from = pid;
int to = (pid + 1) % 2;
glEnable(GL_RASTERIZER_DISCARD);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, tfo[to]);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, bfo[to]);
shader.begin();
glBindBuffer(GL_ARRAY_BUFFER, bfo[from]);
{
GLint inputAttrib = shader.getAttributeLocation("a_Position");
glEnableVertexAttribArray(inputAttrib);
glVertexAttribPointer(inputAttrib, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)offsetof(Particle, position));
}
{
GLint inputAttrib = shader.getAttributeLocation("a_Velocity");
glEnableVertexAttribArray(inputAttrib);
glVertexAttribPointer(inputAttrib, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (void*)offsetof(Particle, velocity));
}
glBeginTransformFeedback(GL_POINTS);
//glDrawArrays(GL_POINTS, 0, 2);
glDrawTransformFeedback(GL_POINTS, tfo[from]);
glEndTransformFeedback();
shader.end();
glDisable(GL_RASTERIZER_DISCARD);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);

Related

when I implement indices into a mesh/face, why does it return an OpenGl error?

So I have a program that generates chunk-meshes based on given vertex data. I already have it working without indices, but upon trying to include them into my program, it returns OpenGL error 1285.
I have the arraybuffer and vao calls on different functions, but they look like this:
void Chunk::_loadArrayBuffers()
{
glGenBuffers(1, &_trianglesID);
glGenBuffers(1, &_uvsID);
glGenBuffers(1, &_normalsID);
glGenBuffers(1, &_IndiceID);
glBindBuffer(GL_ARRAY_BUFFER, _trianglesID);
glBufferData(GL_ARRAY_BUFFER,
_triangles.size() * 3 * sizeof(GLfloat),
_triangles.data(),
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, _uvsID);
glBufferData(GL_ARRAY_BUFFER,
_uvs.size() * 2 * sizeof(GLfloat),
_uvs.data(),
GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, _normalsID);
glBufferData(GL_ARRAY_BUFFER,
_normals.size() * 3 * sizeof(GLfloat),
_normals.data(),
GL_STATIC_DRAW);
if (_indices.size() * 3 > 0)
{
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _IndiceID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
_indices.size() * 3 * sizeof(GLfloat),
_indices.data(),
GL_STATIC_DRAW);
}
}
void Chunk::_makeVAO()
{
glGenVertexArrays(1, &_VAO);
glBindVertexArray(_VAO);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _trianglesID);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, _uvsID);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, _normalsID);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
For each mesh, I store the vertex data of each triangle into multiple glm::vec3s, essentially a vector of size 3. My rectangle/face creation function looks like this:
void Chunk::_addRectangle(glm::vec3 center, glm::vec3 height, glm::vec3 width, unsigned tex_num)
{
if (glm::length(height) == 0 || glm::length(width) == 0)
throw std::runtime_error("width or height should not be 0");
glm::vec3 corner1 = center - (height / 2.0) - (width / 2.0);
glm::vec3 corner2 = center - (height / 2.0) + (width / 2.0);
glm::vec3 corner3 = center + (height / 2.0) + (width / 2.0);
glm::vec3 corner4 = center + (height / 2.0) - (width / 2.0);
glm::vec3 normal = glm::cross(height, width);
glm::vec2 uv1;
glm::vec2 uv2;
glm::vec2 uv3;
glm::vec2 uv4;
if (fabs(normal[1]) == 1.0)
{
uv1 = glm::vec2(1.0 / _tex_atlas_width, 1);
uv2 = glm::vec2(1.0 / _tex_atlas_width, 0);
uv3 = glm::vec2(0, 0);
uv4 = glm::vec2(0, 1);
}
else
{
uv1 = glm::vec2(1.0 / _tex_atlas_width, height[1]);
uv2 = glm::vec2(1.0 / _tex_atlas_width, 0);
uv3 = glm::vec2(0, 0);
uv4 = glm::vec2(0, height[1]);
}
float add = (1.0 / double(_tex_atlas_width)) * tex_num;
uv1.x += add;
uv2.x += add;
uv3.x += add;
uv4.x += add;
// triangle 1
_triangles.push_back(corner3);
_triangles.push_back(corner2);
_triangles.push_back(corner1);
_normals.push_back(normal);
_normals.push_back(normal);
_normals.push_back(normal);
_uvs.push_back(uv1);
_uvs.push_back(uv2);
_uvs.push_back(uv3);
_indices.push_back(glm::vec3(nrOfIndices + 0, nrOfIndices + 1, nrOfIndices + 2));
// triangle 2
//_triangles.push_back(corner1);
_triangles.push_back(corner4);
//_triangles.push_back(corner3);
_normals.push_back(normal);
_normals.push_back(normal);
_normals.push_back(normal);
_uvs.push_back(uv3);
_uvs.push_back(uv4);
_uvs.push_back(uv1);
_indices.push_back(glm::vec3(nrOfIndices + 2, nrOfIndices + 3, nrOfIndices + 0));
nrOfIndices += 4;
}
everything worked perfectly until I tried to add indices. What's wrong with it? I double checked the order of the indices and they seem to be correct, so I'm guessing it's an error with how I'm loading the indices, but I just can't figure it out with what I could find on learnopengl and other opengl documentation. Thanks!
The GL_ELEMENT_ARRAY_BUFFER is stated in the Vertex Array Object. See Index buffers.
The instruction
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _IndiceID);
associates _IndiceID to the current Vertex Array Object.
You've to bind the Vertex Array Object, before you specify the element array buffer:
glGenVertexArrays(1, &_VAO);
glBindVertexArray(_VAO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, _IndiceID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,
_indices.size() * 3 * sizeof(GLuint),
_indices.data(),
GL_STATIC_DRAW);
Note, also the GL_ARRAY_BUFFER is stated in the state vector of the VAO, but this happens when glVertexAttribPointer is called.
When glVertexAttribPointer is called, then the buffer which is currently bound to the target GL_ARRAY_BUFFER is associated to the vertex attribute with the specified index.
The difference in the behavior of GL_ELEMENT_ARRAY_BUFFER and GL_ARRAY_BUFFER is caused, because a VAO can only refer to 1 index (element) buffer, but it can refer to multiple array buffers. Each attribute (index) can be associated to a different buffer.
Further more the data type of the indices has to be integral. Possible data types are (unsigned) char, short respectively int, which correspond to the OpenGL enumerator constants GL_UNSIGNED_BYTE, GL_UNSIGNED_SHORT, or GL_UNSIGNED_INT. See glDrawElements
Change the index vector and use glm::ivec3 rather than glm::vec3
std::vector<glm::ivec3> _indices;
_indices.push_back(glm::ivec3(nrOfIndices + 2, nrOfIndices + 3, nrOfIndices + 0));
glDrawElements(GL_TRIANGLES, _indices.size()*3, GL_UNSIGNED_INT, nullptr);

GLSL Instancing - Passing ModelMatrix

I have done many searches on Google/Stackflow and can't find an answer to my specific issue. I tried to implement instancing using a patched sphere mesh and the following references:
http://ogldev.atspace.co.uk/www/tutorial33/tutorial33.html
https://learnopengl.com/Advanced-OpenGL/Instancing
I have tried to debug my own code but nothing seems to be working... I am consistently getting an output that looks like the modelmatrix values are corrupt or setup incorrectly. Here is what it looks like at different camera rotations when passing in an Identity matrix for the Model:
Instance Model Matrix #1
Instance Model Matrix #2
If I hard-code the model matrix in the vertex shader to be the Identity Matrix then it comes out just fine:
Hard-code Model Matrix
So it seems to me that the model matrix is not being passed in correctly and since I've spend days reviewing my code and trying different things. I'm hoping someone can review this and spot the problem.. THANKS!
InstanceObject::Init()
for (unsigned int Level = 0; Level < Levels; Level++)
{
FrameTicks[Level] = 0;
VBO_SeedVector[Level] = 0;
VBO_ModelMatrix[Level] = 0;
//. Bind VAO for Object's Level of Detail
glBindVertexArray(Meshes->GetMesh(Level)->GetVAO());
//. Generate Buffers for Object's SeedVector
glGenBuffers(1, &VBO_SeedVector[Level]);
//. Generate Instance Array for Object's SeedVector
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, sizeof(Vector4f), (void*)0);
glVertexAttribDivisor(2, 1);
//. Generate Buffers for Object's ModelMatrix
glGenBuffers(1, &VBO_ModelMatrix[Level]);
//. Generate Instance Array for Object's ModelMatrix
glEnableVertexAttribArray(3);
glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(0 * sizeof(Vector4f)));
glVertexAttribDivisor(3, 1);
glEnableVertexAttribArray(4);
glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(1 * sizeof(Vector4f)));
glVertexAttribDivisor(4, 1);
glEnableVertexAttribArray(5);
glVertexAttribPointer(5, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(2 * sizeof(Vector4f)));
glVertexAttribDivisor(5, 1);
glEnableVertexAttribArray(6);
glVertexAttribPointer(6, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(3 * sizeof(Vector4f)));
glVertexAttribDivisor(6, 1);
ObjectSeeds.push_back(std::vector<Vector4f>());
ObjectModels.push_back(std::vector<Matrix4f>());
}
InstanceObject::Update()
//. Load Instance Arrays into VAOs
for (unsigned int Level = 0; Level < Levels; Level++)
{
glBindVertexArray(Meshes->GetMesh(Level)->GetVAO());
glBindBuffer(GL_ARRAY_BUFFER, VBO_SeedVector[Level]);
glBufferData(GL_ARRAY_BUFFER, ObjectSeeds[Level].size() * sizeof(Vector4f), &ObjectSeeds[Level], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, VBO_ModelMatrix[Level]);
glBufferData(GL_ARRAY_BUFFER, ObjectModels[Level].size() * sizeof(Matrix4f), &ObjectModels[Level], GL_STATIC_DRAW);
}
InstanceObject::Render()
//. Empty Detailed Mesh
if (Meshes == nullptr)
return;
//. Enable VAA for Px, Py, Pz, U
glEnableVertexAttribArray(0);
glVertexAttribDivisor(0, 0);
//. Enable VAA for Nx, Ny, Nz, V
glEnableVertexAttribArray(1);
glVertexAttribDivisor(1, 0);
//. Enable VAA for Seed Vector
glEnableVertexAttribArray(2);
glVertexAttribDivisor(2, 0);
//. Enable VAA for Model Matrix
glEnableVertexAttribArray(3);
glVertexAttribDivisor(3, 1);
glEnableVertexAttribArray(4);
glVertexAttribDivisor(4, 1);
glEnableVertexAttribArray(5);
glVertexAttribDivisor(5, 1);
glEnableVertexAttribArray(6);
glVertexAttribDivisor(6, 1);
//. Render Instances For Each Ring
for (unsigned int Level = Levels - 1; Level > 0; Level--)
{
unsigned int RingSize = ObjectModels[Level].size();
//. Empty Ring
if (RingSize == 0)
continue;
BaseMesh* Mesh = Meshes->GetMesh(Level);
//. Empty Mesh
if (Mesh == nullptr)
continue;
unsigned int VAO = Mesh->GetVAO();
if (VAO == 0)
continue;
//. Render Instances
glBindVertexArray(VAO);
glDrawElementsInstanced(GL_TRIANGLES, Mesh->Elements.size(), GL_UNSIGNED_INT, 0, RingSize);
}
glDisableVertexAttribArray(6);
glDisableVertexAttribArray(5);
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(3);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(1);
Vertex Shader
#version 440
layout (location = 0) in vec4 Position;
layout (location = 1) in vec4 Normal;
layout (location = 2) in vec4 Seed;
layout (location = 3) in vec4 ModelCol0;
layout (location = 4) in vec4 ModelCol1;
layout (location = 5) in vec4 ModelCol2;
layout (location = 6) in vec4 ModelCol3;
//uniform mat4 Model;
uniform mat4 View;
uniform mat4 Projection;
out vec3 mPosition;
out vec3 vPosition;
out vec3 vNormal;
out vec2 vSample;
void main()
{
mPosition = Position.xyz;
mat4 vModel = mat4(1.0);
vModel[0] = ModelCol0;
vModel[1] = ModelCol1;
vModel[2] = ModelCol2;
vModel[3] = ModelCol3;
vPosition = (vModel * vec4(mPosition.xyz, 1.0)).xyz;
vNormal = (vModel * vec4(Normal.xyz, 0.0)).xyz;
vSample = vec2(Position.w, Normal.w);
gl_Position = Projection * View * vModel * vec4(mPosition.xyz, 1.0);
}
04/09/18 UPDATE:
I updated the InstanceObject::Init() to bind the VBO before the VAA calls as follows:
for (unsigned int Level = 0; Level < Levels; Level++)
{
FrameTicks[Level] = 0;
VBO_SeedVector[Level] = 0;
VBO_ModelMatrix[Level] = 0;
//. Bind VAO for Object's Level of Detail
glBindVertexArray(Meshes->GetMesh(Level)->GetVAO());
if (!LogErrorGL(std::cout, "glGenBuffers"))
return false;
//. Generate Buffers for Object's SeedVector
glGenBuffers(1, &VBO_SeedVector[Level]);
if (!LogErrorGL(std::cout, "glGenBuffers"))
return false;
//. Enable Vertex Attribute Arrays
glEnableVertexAttribArray(2);
if (!LogErrorGL(std::cout, "glEnableVertexAttribArray"))
return false;
//. Bind Buffer to Setup Instance Array
glBindBuffer(GL_ARRAY_BUFFER, VBO_SeedVector[Level]);
if (!LogErrorGL(std::cout, "glBindBuffer"))
return false;
//. Generate Instance Array for Object's SeedVector
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, sizeof(Vector4f), (void*)0);
if (!LogErrorGL(std::cout, "glVertexAttribPointer"))
return false;
//. Apply Attribute Divisors
glVertexAttribDivisor(2, 1);
if (!LogErrorGL(std::cout, "glVertexAttribDivisor"))
return false;
//. Unbind Buffer
glBindBuffer(GL_ARRAY_BUFFER, 0);
//.
//.
//.
//. Generate Buffers for Object's ModelMatrix
glGenBuffers(1, &VBO_ModelMatrix[Level]);
if (!LogErrorGL(std::cout, "glGenBuffers"))
return false;
//. Enable Vertex Attribute Arrays
glEnableVertexAttribArray(3);
glEnableVertexAttribArray(4);
glEnableVertexAttribArray(5);
glEnableVertexAttribArray(6);
if (!LogErrorGL(std::cout, "glEnableVertexAttribArray"))
return false;
//. Bind Buffer to Setup Instance Arrays
glBindBuffer(GL_ARRAY_BUFFER, VBO_ModelMatrix[Level]);
if (!LogErrorGL(std::cout, "glBindBuffer"))
return false;
//. Generate Instance Array for Object's ModelMatrix
glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(0 * sizeof(Vector4f)));
glVertexAttribPointer(4, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(1 * sizeof(Vector4f)));
glVertexAttribPointer(5, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(2 * sizeof(Vector4f)));
glVertexAttribPointer(6, 4, GL_FLOAT, GL_FALSE, sizeof(Matrix4f), (void*)(3 * sizeof(Vector4f)));
if (!LogErrorGL(std::cout, "glVertexAttribPointer"))
return false;
glVertexAttribDivisor(3, 1);
glVertexAttribDivisor(4, 1);
glVertexAttribDivisor(5, 1);
glVertexAttribDivisor(6, 1);
if (!LogErrorGL(std::cout, "glVertexAttribDivisor"))
return false;
//. Unbind Buffer
glBindBuffer(GL_ARRAY_BUFFER, 0);
//. Bind Default VAO ~ move to outside For Loop
glBindVertexArray(0);
if (!LogErrorGL(std::cout, "glBindVertexArray"))
return false;
ObjectSeeds.push_back(std::vector<Vector4f>());
ObjectModels.push_back(std::vector<Matrix4f>());
}
That all works without throwing any GL errors but I do get an GL_INVALID_OPERATION in the InstanceObject::Update() section when I try to bind the VAO to load the update data:
//. Load Instance Arrays into VAOs
for (unsigned int Level = 0; Level < Levels; Level++)
{
glBindVertexArray(Meshes->GetMesh(Level)->GetVAO()); //. GL_INVALID OPERATION
if (!LogErrorGL(std::cout, "glBindVertexArray"))
return;
//glGenBuffers(1, &VBO_SeedVector[Level]);
glBindBuffer(GL_ARRAY_BUFFER, VBO_SeedVector[Level]);
if (!LogErrorGL(std::cout, "glBindBuffer"))
return;
glBufferData(GL_ARRAY_BUFFER, ObjectSeeds[Level].size() * sizeof(Vector4f), &ObjectSeeds[Level], GL_STATIC_DRAW);
if (!LogErrorGL(std::cout, "glBufferData"))
return;
//glGenBuffers(1, &VBO_ModelMatrix[Level]);
glBindBuffer(GL_ARRAY_BUFFER, VBO_ModelMatrix[Level]);
if (!LogErrorGL(std::cout, "glBindBuffer"))
return;
glBufferData(GL_ARRAY_BUFFER, ObjectModels[Level].size() * sizeof(Matrix4f), &ObjectModels[Level], GL_STATIC_DRAW);
if (!LogErrorGL(std::cout, "glBufferData"))
return;
In InstanceObject::Init(), you never actually bind any of the VBOs you are creating.
The GL'a attribute array pointer contains always a reference to the buffer object name, and the byte offset into this buffer (among other things like data formats, and stride, and so on). glVertexAttribPointer will store the name of the currently bound GL_ARRAY_BUFFER in the attribute pointer.

glDrawArrays with cocos2d not drawing at all

I have a lot of points that i need to draw in a batch and i have been trying it for two days and i cant seem get any progress with glDrawArrays. I have tried DrawNode and drawing each individual point for testing and it works correctly... but i cant seem to get glDrawArray to give any visual result.
Here is my drawing code(changed a few variable names):
auto glProgram = getGLProgram();
if (glProgram == nullptr) {
setGLProgramState(GLProgramState::getOrCreateWithGLProgramName(
GLProgram::SHADER_NAME_POSITION_COLOR));
glProgram = getGLProgram();
if (glProgram == nullptr) {
return;
}
}
glProgram->use();
glProgram->setUniformsForBuiltins();
GL::enableVertexAttribs(GL::VERTEX_ATTRIB_FLAG_POSITION | GL::VERTEX_ATTRIB_FLAG_COLOR);
GLfloat *vertices = new GLfloat[myStruct->data.size()*2];
GLfloat *colors = new GLfloat[myStruct->data.size()*4];
int vIndex = 0;
int cIndex = 0;
for (std::vector<myPointStruct*>::iterator it = myStruct->data.begin(); it != myStruct->data.end(); ++it) {
vertices[vIndex++] = (*it)->pos.x;
vertices[vIndex++] = (*it)->pos.y;
colors[cIndex++] = (*it)->color.r;
colors[cIndex++] = (*it)->color.g;
colors[cIndex++] = (*it)->color.b;
colors[cIndex++] = (*it)->color.a;
glLineWidth(10);
glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_POSITION, 2, GL_FLOAT, GL_FALSE, sizeof(GLfloat), &vertices[0]);
glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_COLOR, 4, GL_FLOAT, GL_FALSE, sizeof(GLfloat), &colors[0]);
glBlendFuncSeparate(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
glDrawArrays(GL_POINTS, 0, (GLsizei) myStruct->data.size());
CC_INCREMENT_GL_DRAWN_BATCHES_AND_VERTICES(1, (GLsizei) myStruct->data.size());
And here is how i call the method:
_renderTexture->begin();
myMethodForDrawing();
_renderTexture->end();
Director::getInstance()->getRenderer()->render();
I have also tried:
_renderTexture->begin();
_customCommand.init(_renderTexture->getGlobalZOrder());
_customCommand.func = CC_CALLBACK_0(MyClass:: myMethodForDrawing,this);
auto renderer = Director::getInstance()->getRenderer();
renderer->addCommand(&_customCommand);
_renderTexture->end();
The 5th paramter of glVertexAttribPointer specifies the byte offset between consecutive generic vertex attributes. If stride is 0, the generic vertex attributes are understood to be tightly packed in the array.
Since your vertices and colors are tightly packed, you do not need to set the stride parameter. Note, sizeof(GLfloat) is wrong anyway. In you case it would be 2 * sizeof(GLfloat) for vertices and 4 * sizeof(GLfloat) for colors.
Change your code like this (focus on the 0 for the 5th parameter):
glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_POSITION, 2, GL_FLOAT, GL_FALSE, 0, &vertices[0]);
glVertexAttribPointer(GLProgram::VERTEX_ATTRIB_COLOR, 4, GL_FLOAT, GL_FALSE, 0, &colors[0]);

OpenGL Partial Drawing

I am working with OpenGL and I am pretty close to where I want to be. I am using VBO's however for some reason my picture is only drawing about half of its vertices (GL_LINE_STRIP). If I change the line:
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)0 );
to
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex)*2, (GLvoid*)0 );
I get the full picture. The parameter that I am changing is the 'stride'. Does anyone know why it is having this effect? If I load I file with more vertices I must increase my stride again to display all vertices. If I change the parameter to anything that is not a multiple of 32 (sizeof(Vertex)), it makes a nonsense picture. Also, if I increase it too much the drawing becomes jagged and it skips vertices.
I am sure I am passing something incorrectly, I just don't know where. (I am not drawing cubes btw, I am just working off of an example). Here is my code:
CreateCube Function:
void CreateCube()
{
string line;
ifstream myfile("C:/Users/Andrew/Documents/Visual Studio 2013/Projects/Control/Control/bin/Debug/TempGeo.test");
if (myfile.is_open())
{
Vertex temp;
int count = 0;
while (getline(myfile, line))
{
if (count == 0)
{
temp.Position[0] = (float)atof(line.c_str());
count++;
}
else if (count == 1)
{
temp.Position[1] = (float)atof(line.c_str());
count++;
}
else if (count == 2)
{
temp.Position[2] = (float)atof(line.c_str());
temp.Position[3] = 1;
temp.Color[0] = 1.0;
temp.Color[1] = 0.0;
temp.Color[2] = 0.0;
temp.Color[3] = 1.0;
verts.push_back(temp);
count = 0;
}
}
cout << verts.size() << endl;
myfile.close();
}
//getMinMax(vertices);
//getDiameter();
ind.push_back(0);
for (int i = 1; i < verts.size()-1; i++)
{
if (i % 2 == 0)
ind.push_back( (GLuint)i / 2);
else
ind.push_back( (GLuint)(i / 2) + 1);
}
ShaderIds[0] = glCreateProgram();
ExitOnGLError("ERROR: Could not create the shader program");
{
//ShaderIds[1] = LoadShader("./OpenGL 3.3/SimpleShader.fragment.3.3.glsl", GL_FRAGMENT_SHADER);
//ShaderIds[2] = LoadShader("./OpenGL 3.3/SimpleShader.vertex.3.3.glsl", GL_VERTEX_SHADER);
ShaderIds[1] = LoadShader("C:/Users/Andrew/Documents/SimpleShader.fragment.3.3.glsl", GL_FRAGMENT_SHADER);
ShaderIds[2] = LoadShader("C:/Users/Andrew/Documents/SimpleShader.vertex.3.3.glsl", GL_VERTEX_SHADER);
glAttachShader(ShaderIds[0], ShaderIds[1]);
glAttachShader(ShaderIds[0], ShaderIds[2]);
}
glLinkProgram(ShaderIds[0]);
ExitOnGLError("ERROR: Could not link the shader program");
ModelMatrixUniformLocation = glGetUniformLocation(ShaderIds[0], "ModelMatrix");
ViewMatrixUniformLocation = glGetUniformLocation(ShaderIds[0], "ViewMatrix");
ProjectionMatrixUniformLocation = glGetUniformLocation(ShaderIds[0], "ProjectionMatrix");
ExitOnGLError("ERROR: Could not get shader uniform locations");
glGenVertexArrays(1, &BufferIds[0]);
ExitOnGLError("ERROR: Could not generate the VAO");
glBindVertexArray(BufferIds[0]);
ExitOnGLError("ERROR: Could not bind the VAO");
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
ExitOnGLError("ERROR: Could not enable vertex attributes");
glGenBuffers(2, &BufferIds[1]);
ExitOnGLError("ERROR: Could not generate the buffer objects");
glBindBuffer(GL_ARRAY_BUFFER, BufferIds[1]);
//glBufferData(GL_ARRAY_BUFFER, sizeof(VERTICES), VERTICES, GL_STATIC_DRAW);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*verts.size(), &verts[0], GL_STATIC_DRAW);
ExitOnGLError("ERROR: Could not bind the VBO to the VAO");
cout << sizeof(verts[0].Position) << endl;
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)0);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)sizeof(verts[0].Position));
ExitOnGLError("ERROR: Could not set VAO attributes");
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, BufferIds[2]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint)*ind.size(), &ind[0], GL_STATIC_DRAW);
ExitOnGLError("ERROR: Could not bind the IBO to the VAO");
glBindVertexArray(0);
}
DrawCube function:
void DrawCube(void)
{
float CubeAngle;
clock_t Now = clock();
if (LastTime == 0)
LastTime = Now;
CubeRotation += 45.0f * ((float)(Now - LastTime) / CLOCKS_PER_SEC);
CubeAngle = DegreesToRadians(CubeRotation);
LastTime = Now;
ModelMatrix = IDENTITY_MATRIX;
RotateAboutY(&ModelMatrix, CubeAngle);
RotateAboutX(&ModelMatrix, CubeAngle);
glUseProgram(ShaderIds[0]);
ExitOnGLError("ERROR: Could not use the shader program");
glUniformMatrix4fv(ModelMatrixUniformLocation, 1, GL_FALSE, ModelMatrix.m);
glUniformMatrix4fv(ViewMatrixUniformLocation, 1, GL_FALSE, ViewMatrix.m);
ExitOnGLError("ERROR: Could not set the shader uniforms");
glBindVertexArray(BufferIds[0]);
ExitOnGLError("ERROR: Could not bind the VAO for drawing purposes");
glDrawElements(GL_LINE_STRIP, 29000, GL_UNSIGNED_INT, (GLvoid*)0);
//glDrawElements(GL_LINE_STRIP, 29000, GL_UNSIGNED_INT, &verts[0]);
ExitOnGLError("ERROR: Could not draw the cube");
glBindVertexArray(0);
glUseProgram(0);
}
I figured it out, I was inputting my indices wrong. I was making it so the elements were 1, 2, 2, 3, 3, 4, 4... (indices vector). but actually it really should just be consecutive counts, 1, 2, 3, 4, 5, 6, ... vector.size() - 1, vector.size(). I did not know how indices worked I thought you had to connnect 1 to 2, 2 to 3, 3 to 4... and so that's why I was putting in two of each number. However, it seems that it just goes from 1 to 2 to 3 to 4.
So change:
ind.push_back(0);
for (int i = 1; i < verts.size()-1; i++)
{
if (i % 2 == 0)
ind.push_back( (GLuint)i / 2);
else
ind.push_back( (GLuint)(i / 2) + 1);
}
to
ind.push_back(0);
for (int i = 1; i < verts.size(); i++)
{
ind.push_back(i);
}

Why does this vertex buffer object fails to Update?

I have the following pieces of code where I successfully create a vertex buffer object, initialize it with data, and render it using GLSL 4.0. However, when I go to update the data stored in the vertices after animation, OpenGL gives me the error code 0x502 and does not accept my updated vertices information.
Could someone point me in the direction as to why these code does not allow my vertices information to be successfully updated? I should also mention that sometimes, the data is successfully updated with is not always consistent/predictable.
Data Structure used
struct Vertex3{
glm::vec3 vtx; //0
glm::vec3 norm; //3
glm::vec3 tex; //6 Use for texturing or color
};
vector<Vertex3> geometry.vertices3;
Initialization Code
void solidus::Mesh::initVBO(){
geometry.totalVertexCount = geometry.getVertexCount();
// Allocate an OpenGL vertex array object.
glGenVertexArrays(1, &vertexArrayId);
glGenBuffers(2,geometry.vboObjects);
// Bind the vertex array object to store all the buffers and vertex attributes we create here.
glBindVertexArray(vertexArrayId);
glBindBuffer(GL_ARRAY_BUFFER, geometry.vboObjects[VERTEX_DATA]);
//size the size of the total vtx
GLuint byte_size = getTotalSize();
//Reserve the inital space for the vertex data
glBufferData(GL_ARRAY_BUFFER, byte_size, NULL, GL_STREAM_DRAW);
if(geometry.isStructVertex4())
initVBO4( );
else if(geometry.isStructVertex3())
initVBO3( );
else
initVBO2( );
//release
glBindVertexArray(0);
geometry.vertices4.clear();
//geometry.vertices3.clear();
geometry.vertices2.clear();
}
void solidus::Mesh::initVBO3( ){
//getTotalSize() == getVtxCount() * sizeof(Vertex3);
glBufferSubData(GL_ARRAY_BUFFER, 0, getTotalSize(), &geometry.vertices3[0]);
//Note: offsetof -- c++ standard library
//Note: glVertexAttribPointer- first parameter is location of GLSL variable
glEnableVertexAttribArray(0); // Vertex4 position
glVertexAttribPointer( (GLuint)0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex3), (GLvoid*)offsetof(Vertex3,vtx) );
// Vertex4 normal
glEnableVertexAttribArray(1);
glVertexAttribPointer( (GLuint)1, 3, GL_FLOAT, GL_TRUE, sizeof(Vertex3), (GLvoid*)offsetof(Vertex3,norm) );
// Texture coords
glEnableVertexAttribArray(2);
glVertexAttribPointer( (GLuint)2, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex3),(GLvoid*)offsetof(Vertex3,tex) );
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, geometry.vboObjects[INDEX_DATA]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(GLuint)*geometry.indices.size(), &geometry.indices[0], GL_STATIC_DRAW);
}
Update the Mesh Vertex information why does this fail
void solidus::Mesh::uploadVertexGLFx(){
glBindBuffer(GL_ARRAY_BUFFER, geometry.vboObjects[VERTEX_DATA]);
string e0="";
if(geometry.isStructVertex2()){
solidus::GLVBO::setVBOSubData(getTotalSize (), &geometry.vertices2[0]);
e0="Vertex2";
}else if(geometry.isStructVertex3()){
//THIS IS THE POINT OF INTEREST: at least suspected!!!!!
// getVtxCount() * sizeof(Vertex3) = getTotalSize
glBufferSubData(GL_ARRAY_BUFFER, 0, getTotalSize (), &geometry.vertices3[0]);
e0="Vertex3";
}else {
solidus::GLVBO::setVBOSubData(getTotalSize (), &geometry.vertices4[0]);
e0="Vertex4";
}
//report error is glGetError is not equal to 0
postMsg("failed to upload vertex for struct " + e0 , "uploadVertexGLFx",30);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
I modified my updateVertexGLFx function to the code listed below. The main difference with this good is that after I resupplied the vertices information to GL, I informed OpenGL of the pointer offset using gl*AtribPointer. Now the program reliably updates when I call my update function.
void solidus::Mesh::uploadVertexGLFx(){
glBindBuffer(GL_ARRAY_BUFFER, geometry.vboObjects[VERTEX_DATA]);
string e0="";
if(geometry.isStructVertex2()){
solidus::GLVBO::setVBOSubData(getTotalSize (), &geometry.vertices2[0]);
e0="Vertex2";
}else if(geometry.isStructVertex3()){
//glBufferData(GL_ARRAY_BUFFER, getTotalSize (), NULL, GL_STREAM_DRAW);
//THIS IS THE POINT OF INTEREST: at least suspected!!!!!
// getVtxCount() * sizeof(Vertex3) = getTotalSize
cout << "Total Size = " << getTotalSize() <<endl;
cout << "Vtx Count = " << getVtxCount() << endl;
cout << "Sizeof(Vertex3)=" <<sizeof(Vertex3)<<endl;
Vertex3 *f = new Vertex3[getVtxCount()];
for(int i=0; i<getVtxCount();i++){
f[i] = geometry.vertices3[i];
}
glBufferData(GL_ARRAY_BUFFER, getTotalSize(), NULL, GL_STREAM_DRAW);
glBufferSubData(GL_ARRAY_BUFFER, 0, getTotalSize (), f);
//Note: glVertexAttribPointer- first parameter is location of GLSL variable
glEnableVertexAttribArray(0); // Vertex4 position
glVertexAttribPointer( (GLuint)0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex3), (GLvoid*)offsetof(Vertex3,vtx) );
// Vertex4 normal
glEnableVertexAttribArray(1);
glVertexAttribPointer( (GLuint)1, 3, GL_FLOAT, GL_TRUE, sizeof(Vertex3), (GLvoid*)offsetof(Vertex3,norm) );
// Texture coords
glEnableVertexAttribArray(2);
glVertexAttribPointer( (GLuint)2, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex3),(GLvoid*)offsetof(Vertex3,tex) );
delete f;
f = nullptr;
e0="Vertex3";
}else {
solidus::GLVBO::setVBOSubData(getTotalSize (), &geometry.vertices4[0]);
e0="Vertex4";
}
//report error is glGetError is not equal to 0
postMsg("failed to upload vertex for struct " + e0 , "uploadVertexGLFx",30);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}