I have some trouble with my shader/vbo/vao stuffs with OpenGL.
Situation
I have created a geometry shader that allows me to draw a cube from a position (Position set using uniforms).
I'm trying to use a chunk system to reduce my draw calls. The problem is that I can't find a way to start. How to reduce draw calls but still render as many cubes ?
Implementation ideas
I was thinking about re-working my geometry shader with 3 for-loops of 16 iterations to generates vertex positions of my 4096 cubes. The other possibility I had in mind was to call 4096 times my shader for each position. (I'm a beginner in OpenGL but I'm not sure that is possible).
My difficulties
Should I send those positions (Or indexes in my chunk that I'll convert to position) to my shader with uniform or a VBO ?
In the case of a VBO, is it possible to send an array of uint8_t ? Each element of the array will contain a byte to tell the block type, 0 for air, 1 for dirt. Note that the array indexes will get used to determine block position. (Exemple: chunk_data[0][5][2] position will be vec3(0.f, 5.f, 2.f) + chunk_position and his type is dirt).
Again in the case I choose to go for a VBO, is it possible to change : layout (location = 0) in vec3 position; to an array of bytes (int I guess because GLSL doesn't provide bytes/uint8_t)
Edit
I found, thanks to #Acorn that instanced drawing could help me to improve my performances. I'm now trying to understand how instanced drawing really works and how to apply this to my problem. For now I think that I need to define what will contain my instanced array (I guess it will be blocks positions and types)
Resources
RenderingManager.cpp (Method that handle voxel drawing)
void ElkRendering::Managers::RenderingManager::DrawVoxel(const glm::vec3 & p_position)
{
if (m_currentTexture && m_currentShader)
{
m_currentShader->SetUniformVec3("u_position", p_position);
m_voxel.Draw(*m_currentShader);
}
}
Voxel.cpp (Setup and draw methods)
void Voxel::Setup()
{
GLfloat verts[] =
{
0.0f, 0.0f, 0.0f
};
glGenVertexArrays(1, &m_vao);
glGenBuffers(1, &m_vbo);
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
void Voxel::Draw(Shader& p_shader)
{
PROFILER_SPY("Voxel::Draw");
glBindVertexArray(m_vao);
glDrawArrays(GL_POINTS, 0, 1);
glBindVertexArray(0);
}
Chunk.cpp (Just an idea, this code doesn't work at all and isn't completed)
void Chunk::Setup()
{
for (uint8_t x = 0; x < 16; ++x)
{
for (uint8_t y = 0; y < 16; ++y)
{
for (uint8_t z = 0; z < 16; ++z)
{
blocks[x][y][z] = 1;
}
}
}
glGenVertexArrays(1, &m_vao);
glGenBuffers(1, &m_vbo);
glBindVertexArray(m_vao);
glBindBuffer(GL_ARRAY_BUFFER, m_vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(uint8_t) * 16 * 16 * 16, blocks, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindVertexArray(0);
}
void Chunk::Draw(Shader& p_shader)
{
glBindVertexArray(m_vao);
glDrawArrays(GL_POINTS, 0, 1);
glBindVertexArray(0);
}
voxel.glsl
#shader vertex
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec2 texCoord;
out vec3 v_pos;
void main()
{
v_pos = position;
}
#shader geometry
#version 330 core
layout (points) in;
layout (triangle_strip, max_vertices = 24) out;
uniform vec3 u_position;
uniform mat4 u_vp;
const float textCount = 3.0f;
const vec3 cubeVertex[8] = vec3[8]
(
/* Z+ (Front) */
vec3(-0.5f, -0.5f, +0.5f), // 0
vec3(-0.5f, +0.5f, +0.5f), // 1
vec3(+0.5f, -0.5f, +0.5f), // 2
vec3(+0.5f, +0.5f, +0.5f), // 3
/* Z- (Back) */
vec3(-0.5f, -0.5f, -0.5f), // 4
vec3(-0.5f, +0.5f, -0.5f), // 5
vec3(+0.5f, -0.5f, -0.5f), // 6
vec3(+0.5f, +0.5f, -0.5f) // 7
);
const int cubeIndices[24] = int[24]
(
1,0,3,2, // Z+ (Front)
3,2,7,6, // X+ (Right)
7,6,5,4, // Z- (Back)
5,4,1,0, // X- (Left)
0,4,2,6, // Y- (Bottom)
5,1,7,3 // Y+ (Top)
);
const vec2 textCoords[24] = vec2[24]
(
vec2(1/textCount, 1), vec2(1/textCount, 0), vec2(2/textCount, 1), vec2(2/textCount, 0), // Front
vec2(1/textCount, 1), vec2(1/textCount, 0), vec2(2/textCount, 1), vec2(2/textCount, 0), // Right
vec2(1/textCount, 1), vec2(1/textCount, 0), vec2(2/textCount, 1), vec2(2/textCount, 0), // Back
vec2(1/textCount, 1), vec2(1/textCount, 0), vec2(2/textCount, 1), vec2(2/textCount, 0), // Left
vec2(2/textCount, 1), vec2(2/textCount, 0), vec2(1, 1), vec2(1, 0), // Bottom
vec2(0, 1), vec2(0, 0), vec2(1/textCount, 1), vec2(1/textCount, 0) // Top
);
out vec2 textureCoord;
out float lighting;
void main()
{
mat4 model = mat4(1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, u_position.x, u_position.y, u_position.z, 1.0);
mat4 mvp = u_vp * model;
for (int i = 0; i < 24; ++i)
{
gl_Position = mvp * (gl_in[0].gl_Position + vec4(cubeVertex[cubeIndices[i]], 0));
textureCoord = textCoords[i];
if (i >= 16 && i < 20)
lighting = 0.2f;
else if (i >= 20)
lighting = 1.0f;
else
lighting = 0.6f;
EmitVertex();
if ((i + 1) % 4 == 0) EndPrimitive();
}
}
#shader fragment
#version 330 core
uniform sampler2D tex;
out vec4 FragColor;
in vec2 textureCoord;
in float lighting;
void main()
{
// Final color
FragColor = texture2D(tex, textureCoord) * lighting;
}
Related
I'm trying to show the normals of some vertices of the mesh when I select them but accordingly to the position of the cam these normals are displayed or not and the big problem is that they are displayed when they shouldn't (i.e. when i select vertices of the tibia but i'm looking at the calf so they should be hidden by the leg) and not displayed when they should be displayed.
I could be wrong but it's like some geometry of the model is drawn above the line. Moreover, if before calling the draw calls i disable the depth test it works (even if the problem of being showed when they shouldn't isn't resolved obviously). That's some images to give you an idea of what I mean.
WITH DEPTH TEST OFF:
DEPT TEST ON:
That's the code:
void StatusManager::Render() {
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
if (!animatedModel)
return;
Update();
// draw wireframe if enabled
if (wireframeEnabled)
DrawWireframe();
if (visualMode == Mode_Texture) {
// draw model
if (lightingMode == Mode_Flat)
DrawModel(modelFlatShader);
else if (lightingMode == Mode_Smooth)
DrawModel(modelSmoothShader);
else
DrawModel(modelNoLightShader);
}
else if (visualMode == Mode_CurrentBoneIDInfluence) {
DrawModel(currentBoneShader);
}
else if (visualMode == Mode_NumBones)
{
DrawModel(numBonesShader);
}
else
{
DrawModel(modelGreyShader);
}
// render selected vertices
DrawSelectedVertices();
if (!info.hitPoint)
return;
//DrawHotPoint();
if (selectionMode == Mode_Vertex)
DrawHoveredPoint();
if (selectionMode == Mode_Edge)
DrawHoveredLine();
if (selectionMode == Mode_Face)
DrawHoveredFace();
}
void StatusManager::DrawWireframe() {
wireframeShader.use();
glLineWidth(1.0f);
// model/view/projection transformations
glm::mat4 modelView = camera.viewMatrix;
wireframeShader.setMat4("modelView", modelView);
wireframeShader.setMat4("projection", projection);
// pass bones matrices to the shader
auto transforms = animator.GetFinalBoneMatrices();
for (int i = 0; i < transforms.size(); ++i)
wireframeShader.setMat4("finalBonesMatrices[" + std::to_string(i) + "]", transforms[i]);
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
animatedModel.value().Draw(wireframeShader);
}
void StatusManager::DrawModel(Shader& modelShader) {
modelShader.use();
// model/view/projection transformations
modelShader.setMat4("modelView", camera.viewMatrix);
modelShader.setMat4("projection", projection);
if (visualMode == Mode_Texture)
modelShader.setVec3("light_pos", lightPos);
else if (visualMode == Mode_CurrentBoneIDInfluence)
modelShader.setInt("currentBoneID", currentBoneID);
// pass bones matrices to the shader
auto transforms = animator.GetFinalBoneMatrices();
for (int i = 0; i < transforms.size(); ++i)
modelShader.setMat4("finalBonesMatrices[" + std::to_string(i) + "]", transforms[i]);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1.0, 1.0);
/*if (pause)
bakedModel.value().Draw(modelShader);
else*/
animatedModel.value().Draw(modelShader);
}
void StatusManager::DrawHoveredFace() {
assert(bakedModel.has_value());
assert(info.hitPoint.has_value());
Mesh& m = bakedModel.value().meshes[info.meshIndex];
Face& f = info.face.value();
//vertex
float hoveredVertices[9] = {
m.vertices[f.indices[0]].Position.x, m.vertices[f.indices[0]].Position.y, m.vertices[f.indices[0]].Position.z,
m.vertices[f.indices[1]].Position.x, m.vertices[f.indices[1]].Position.y, m.vertices[f.indices[1]].Position.z,
m.vertices[f.indices[2]].Position.x, m.vertices[f.indices[2]].Position.y, m.vertices[f.indices[2]].Position.z,
};
hoverShader.use();
glBindVertexArray(HVAO);
glBindBuffer(GL_ARRAY_BUFFER, HVBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(hoveredVertices), &hoveredVertices);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(0.0, 0.0);
// model/view/projection transformations
glm::mat4 modelView = camera.viewMatrix;
hoverShader.setMat4("modelView", modelView);
hoverShader.setMat4("projection", projection);
glDrawArrays(GL_TRIANGLES, 0, 3);
//unbind
glBindVertexArray(0);
}
void StatusManager::DrawHoveredPoint() {
assert(bakedModel.has_value());
assert(info.hitPoint.has_value());
Mesh& m = bakedModel.value().meshes[info.meshIndex];
Face& f = info.face.value();
int index = getClosestVertexIndex(info.hitPoint.value(), m, f);
//vertex
float hoveredVertices[3] = { m.vertices[index].Position.x, m.vertices[index].Position.y, m.vertices[index].Position.z };
hoverShader.use();
glBindVertexArray(HVAO);
glBindBuffer(GL_ARRAY_BUFFER, HVBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, 3 * sizeof(float), &hoveredVertices);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(0.0, 0.0);
// model/view/projection transformations
glm::mat4 modelView = camera.viewMatrix;
hoverShader.setMat4("modelView", modelView);
hoverShader.setMat4("projection", projection);
glPointSize(8.0f);
glDrawArrays(GL_POINTS, 0, 1);
//unbind
glBindVertexArray(0);
}
void StatusManager::DrawHotPoint()
{
float hotVertices[3] = { hotPoint.x, hotPoint.y, hotPoint.z };
hoverShader.use();
glBindVertexArray(HVAO);
glBindBuffer(GL_ARRAY_BUFFER, HVBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, 3 * sizeof(float), &hotVertices);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(0.0, 0.0);
// model/view/projection transformations
glm::mat4 modelView = camera.viewMatrix;
hoverShader.setMat4("modelView", modelView);
hoverShader.setMat4("projection", projection);
glPointSize(8.0f);
glDrawArrays(GL_POINTS, 0, 1);
//unbind
glBindVertexArray(0);
}
void StatusManager::UpdateSelectedVertices()
{
selectedVertices.clear();
for (Vertex* v : selectedVerticesPointers)
selectedVertices.push_back(*v);
}
void StatusManager::DrawHoveredLine() {
assert(bakedModel.has_value());
assert(info.hitPoint.has_value());
Mesh& m = bakedModel.value().meshes[info.meshIndex];
Face& f = info.face.value();
auto line = getClosestLineIndex(info.hitPoint.value(), m, f);
//vertex
float hoveredVertices[6] = {
m.vertices[line.v1].Position.x, m.vertices[line.v1].Position.y, m.vertices[line.v1].Position.z,
m.vertices[line.v2].Position.x, m.vertices[line.v2].Position.y, m.vertices[line.v2].Position.z
};
hoverShader.use();
glBindVertexArray(HVAO);
glBindBuffer(GL_ARRAY_BUFFER, HVBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(hoveredVertices), &hoveredVertices);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(0.0, 0.0);
// model/view/projection transformations
glm::mat4 modelView = camera.viewMatrix;
hoverShader.setMat4("modelView", modelView);
hoverShader.setMat4("projection", projection);
glLineWidth(3.0f);
glDrawArrays(GL_LINES, 0, 2);
glLineWidth(1.0f);
//unbind
glBindVertexArray(0);
}
normalShader just for completeness
VERTEX SHADER:
#version 330 core
layout(location = 0) in vec3 pos;
layout(location = 1) in vec3 norm;
layout(location = 2) in ivec4 boneIds;
layout(location = 3) in vec4 weights;
layout(location = 4) in int numBones;
uniform mat4 projection;
uniform mat4 modelView;
uniform mat4 finalBonesMatrices[100];
out VS_OUT {
vec4 normal;
} vs_out;
void main()
{
mat4 cumulativeMatrix = mat4(1.0);
if (numBones>0)
cumulativeMatrix = mat4(0.0);
for(int i = 0 ; i < numBones ; i++)
{
cumulativeMatrix += (finalBonesMatrices[boneIds[i]] * weights[i]);
}
gl_Position = projection * modelView * cumulativeMatrix * vec4(pos, 1.0);
vs_out.normal = normalize(modelView * cumulativeMatrix * vec4(norm, 0.0));
}
GEOMETRY SHADER:
#version 330 core
layout (points) in;
layout (line_strip, max_vertices = 2) out;
uniform float normal_length;
in VS_OUT {
vec4 normal;
} gs_in[];
void main() {
gl_Position = gl_in[0].gl_Position;
EmitVertex();
gl_Position = gl_in[0].gl_Position + gs_in[0].normal * normal_length;
EmitVertex();
EndPrimitive();
}
FRAGMENT SHADER:
#version 330 core
out vec4 FragColor;
void main()
{
FragColor = vec4(1, 0, 0, 1);
}
If you need other piece of code tell me and I'll provide it, but I think I posted all the code and images you need.
Ok I don't know how I didn't noticed it, but yes, the problem was that I wasn't applying the projection matrix to the normal vector. Thanks Spektre to point this out.
I am using Assimp to load models to render in OpenGL but am running into an issue where chunks/pieces of a mesh don't render.
Example:
What model is supposed to look like:
What I end up rendering:
As you can see, some of the model is rendering properly, but not all.
I have verified multiple times that the meshes being loaded from assimp are loading the correct vertices and indices into my "Mesh" class. Here is my code for loading a model:
This function will recursively call itself for all child nodes and load each mesh inside the node. Each mesh will then be transformed into my own "Mesh" class by creating a vector of vertices and faces.
void Model::LoadAssimpNode(aiNode* node, const aiScene* scene)
{
// Process assimp meshes
for (unsigned int i = 0; i < node->mNumMeshes; i++)
{
aiMesh* mesh = scene->mMeshes[node->mMeshes[i]];
this->meshes.push_back(this->LoadAssimpMesh(mesh, scene));
}
// Recursivley processes child nodes
for (unsigned int i = 0; i < node->mNumChildren; i++)
{
this->LoadAssimpNode(node->mChildren[i], scene);
}
}
Mesh Model::LoadAssimpMesh(aiMesh* mesh, const aiScene* scene)
{
std::vector<sVertex> vertices;
for (unsigned int i = 0; i < mesh->mNumVertices; i++)
{
sVertex vertex;
vertex.x = mesh->mVertices[i].x;
vertex.y = mesh->mVertices[i].y;
vertex.z = mesh->mVertices[i].z;
vertex.nx = mesh->mNormals[i].x;
vertex.ny = mesh->mNormals[i].y;
vertex.nz = mesh->mNormals[i].z;
if (mesh->mTextureCoords[0])
{
vertex.u0 = mesh->mTextureCoords[0][i].x;
vertex.v0 = mesh->mTextureCoords[0][i].y;
}
vertices.push_back(vertex);
}
std::vector<sTriangle> faces;
for (unsigned int i = 0; i < mesh->mNumFaces; i++)
{
sTriangle face;
aiFace assimpFace = mesh->mFaces[i];
if (assimpFace.mNumIndices != 3)
{
std::cout << "Face is not a triangle!" << std::endl;
}
for (unsigned int j = 0; j < assimpFace.mNumIndices; j++)
{
face.vertIndex[j] = assimpFace.mIndices[j];
}
faces.push_back(face);
}
std::vector<Texture> textures;
if (mesh->mMaterialIndex >= 0)
{
aiMaterial* material = scene->mMaterials[mesh->mMaterialIndex];
// Sampler names should adhere to the following convention:
// Diffuse: texure_diffuseN
// Specular: texture_specularN
// Normal: texture_normalN
// Where N = texture numbers
for (Texture texture : this->LoadAssimpMaterialTextures(material, aiTextureType_DIFFUSE, "texture_diffuse"))
{
this->loadedTextures.insert(std::make_pair(texture.path.C_Str(), texture));
textures.push_back(texture);
}
for (Texture texture : this->LoadAssimpMaterialTextures(material, aiTextureType_SPECULAR, "texture_specular"))
{
this->loadedTextures.insert(std::make_pair(texture.path.C_Str(), texture));
textures.push_back(texture);
}
}
return Mesh(vertices, faces, textures);
}
The sVertex and sTriangle structs are defined as:
struct sVertex
{
float x, y, z;
float nx, ny, nz;
float u0, v0;
};
struct sTriangle
{
unsigned int vertIndex[3];
};
Now that the model is effectively loaded from assimp, we now call the SetupMesh() function which sets up the meshes' respective VAO, VBO and EBO:
void Mesh::SetupMesh()
{
// Generate IDs for our VAO, VBO and EBO
glGenVertexArrays(1, &this->VAO);
glGenBuffers(1, &this->VBO);
glGenBuffers(1, &this->EBO);
glBindVertexArray(this->VAO);
// Now ANY state that is related to vertex or index buffer
// and vertex attribute layout, is stored in the 'state'
// of the VAO...
// Tell open GL where to look for for vertex data
glBindBuffer(GL_ARRAY_BUFFER, this->VBO);
glBufferData(GL_ARRAY_BUFFER, this->vertices.size() * sizeof(sVertex), &this->vertices[0], GL_STATIC_DRAW);
// Tell open GL where our index buffer begins (AKA: where to look for faces)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this->EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, this->faces.size() * sizeof(sTriangle), &this->faces[0], GL_STATIC_DRAW);
// Set the vertex attributes for this shader
// Layout information can be found in the vertex shader, currently:
// 0 = position
// 1 = normals
// 2 = texture coordinates
glEnableVertexAttribArray(0); // position
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(sVertex), (void*) offsetof(sVertex, x));
glEnableVertexAttribArray(1); // normal
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(sVertex), (void*) offsetof(sVertex, nx));
glEnableVertexAttribArray(2); // textureCoordinates
glVertexAttribPointer(2, 2, GL_FLOAT, GL_FALSE, sizeof(sVertex), (void*)offsetof(sVertex, u0));
// Now that all the parts are set up, unbind buffers
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
}
Once this is all setup, I will now call the Draw method for each mesh to render in my render loop:
void Mesh::Draw(const CompiledShader& shader)
{
glm::mat4 matModel = glm::mat4(1.0f);
glm::mat4 matTranslate = glm::translate(glm::mat4(1.0f), this->positionXYZ); // Translation matrix
glm::mat4 rotateX = glm::rotate(glm::mat4(1.0f), this->orientationXYZ.x, glm::vec3(1.0f, 0.0f, 0.0f)); // X axis rotation
glm::mat4 rotateY = glm::rotate(glm::mat4(1.0f), this->orientationXYZ.y, glm::vec3(0.0f, 1.0f, 0.0f)); // Y axis rotation
glm::mat4 rotateZ = glm::rotate(glm::mat4(1.0f), this->orientationXYZ.z, glm::vec3(0.0f, 0.0f, 1.0f)); // Z axis rotation
glm::mat4 matScale = glm::scale(glm::mat4(1.0f), glm::vec3(this->scale, this->scale, this->scale)); // Scale the mesh
glm::mat4 matInvTransposeModel = glm::inverse(glm::transpose(matModel));
// Apply all the transformations to our matrix
matModel = matModel * matTranslate;
matModel = matModel * rotateZ;
matModel = matModel * rotateY;
matModel = matModel * rotateX;
matModel = matModel * matScale;
glUseProgram(shader.ID);
glUniformMatrix4fv(glGetUniformLocation(shader.ID, "matModel"), 1, GL_FALSE, glm::value_ptr(matModel)); // Tell shader the model matrix (AKA: Position orientation and scale)
glUniformMatrix4fv(glGetUniformLocation(shader.ID, "matModelInverseTranspose"), 1, GL_FALSE, glm::value_ptr(matInvTransposeModel));
// Draw the mesh
glBindVertexArray(this->VAO);
glDrawElements(GL_TRIANGLES, this->faces.size(), GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
}
My shaders are very simple where the color of a pixel is equal to the vertex's normal:
Vertex Shader:
#version 420
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 normal;
layout (location = 2) in vec2 textureCoordinates;
uniform mat4 matModel;
uniform mat4 matView;
uniform mat4 matProjection;
uniform mat4 matModelInverseTranspose; // For normal calculation
out vec4 fVertWorldLocation;
out vec4 fNormal;
out vec2 TextureCoordinates;
void main()
{
mat4 MVP = matProjection * matView * matModel;
gl_Position = MVP * vec4(position, 1.0f);
TextureCoordinates = textureCoordinates;
// The location of the vertex in "world" space (not screen space)
fVertWorldLocation = matModel * vec4(position, 1.0f);
// Calculate the normal based on any rotation we've applied.
// This inverse transpose removes scaling and tranlation (movement)
// from the matrix.
fNormal = matModelInverseTranspose * vec4(normal, 1.0f);
};
Fragment Shader:
#version 420
in vec2 TextureCoordinates;
in vec4 fNormal;
out vec4 Color;
uniform sampler2D texture_diffuse;
void main()
{
//Color = vec4(texture(texture_diffuse, TextureCoordinates));
//Color = vec4(TextureCoordinates, 1.0f, 1.0f);
Color = fNormal;
}
Sorry for the insane length of this post, but I feel that all of it was necessary to get my point across.
If anyone could point out what I am doing wrong here it would be greatly appreciated! I feel like I need an extra pair of eyes here because I have read my code over countless times and can't seem to come up with anything.
Made a stupid mistake, I was under the impression that the "count" argument in the glDrawElements() function wanted the number of faces NOT the number of indices.
The problem was fixed by changing my glDrawElements call from:
glDrawElements(GL_TRIANGLES, this->faces.size(), GL_UNSIGNED_INT, 0);
To this:
glDrawElements(GL_TRIANGLES, this->faces.size() * 3, GL_UNSIGNED_INT, 0);
I simply want to draw a line to the screen. I'm using OpenGl 4.6. All tutorials I found used a glVertexPointer, which is deprecated as far as I can tell.
I know how you can draw triangles using buffers, so I tried that with a line. It didn't work, merely displaying a black screen. (I'm using GLFW and GLEW, and I am using a vertex+fragment shader I already tested on the triangle)
// Make line
float line[] = {
0.0, 0.0,
1.0, 1.0
};
unsigned int buffer; // The ID, kind of a pointer for VRAM
glGenBuffers(1, &buffer); // Allocate memory for the triangle
glBindBuffer(GL_ARRAY_BUFFER, buffer); // Set the buffer as the active array
glBufferData(GL_ARRAY_BUFFER, 2 * sizeof(float), line, GL_STATIC_DRAW); // Fill the buffer with data
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(float), 0); // Specify how the buffer is converted to vertices
glEnableVertexAttribArray(0); // Enable the vertex array
// Loop until the user closes the window
while (!glfwWindowShouldClose(window))
{
// Clear previous
glClear(GL_COLOR_BUFFER_BIT);
// Draw the line
glDrawArrays(GL_LINES, 0, 2);
// Swap front and back buffers
glfwSwapBuffers(window);
// Poll for and process events
glfwPollEvents();
}
Am I going in the right direction, or is a completely different approach the current best practice?
If I am, how do I fix my code?
The issue is the call to glBufferData. The 2nd argument is the size of the buffer in bytes. Since the vertex array consists of 2 coordinates with 2 components, the size of the bufferis 4 * sizeof(float) rather than 2 * sizeof(float):
glBufferData(GL_ARRAY_BUFFER, 2 * sizeof(float), line, GL_STATIC_DRAW);
glBufferData(GL_ARRAY_BUFFER, 4 * sizeof(float), line, GL_STATIC_DRAW);
But note that is still not "modern" OpenGL. If you want to use core profile OpenGL Context, then you have to use a Shader program and a Vertex Array Object
However, if you are using a core OpenGL context and the forward compatibility bit is set, the width of a line (glLineWidth), cannot be grater than 1.0.
See OpenGL 4.6 API Core Profile Specification - E.2 Deprecated and Removed Features
Wide lines - LineWidth values greater than 1.0 will generate an INVALID_VALUE error.
You have to find a different approach.
I recommend to use a Shader, which generates triangle primitives along a line strip (or even a line loop).
The task is to generate thick line strip, with as less CPU and GPU overhead as possible. That means to avoid computation of polygons on the CPU as well as geometry shaders (or tessellation shaders).
Each segment of the line consist of a quad represented by 2 triangle primitives respectively 6 vertices.
0 2 5
+-------+ +
| / / |
| / / |
| / / |
+ +-------+
1 3 4
Between the line segments the miter hast to be found and the quads have to be cut to the miter.
+----------------+
| / |
| segment 1 / |
| / |
+--------+ |
| segment 2
| |
| |
+-------+
Create an array with the corners points of the line strip. The first and the last point define the start and end tangents of the line strip. So you need to add 1 point before the line and one point after the line. Of course it would be easy, to identify the first and last element of the array by comparing the index to 0 and the length of the array, but we don't want to do any extra checks in the shader.
If a line loop has to be draw, then the last point has to be add to the array head and the first point to its tail.
The array of points is stored to a Shader Storage Buffer Object. We use the benefit, that the last variable of the SSBO can be an array of variable size. In older versions of OpenGL (or OpenGL ES) a Uniform Buffer Object or even a Texture can be used.
The shader doesn't need any vertex coordinates or attributes. All we have to know is the index of the line segment. The coordinates are stored in the buffer. To find the index we make use of the the index of the vertex currently being processed (gl_VertexID).
To draw a line strip with N segments, 6*(N-1) vertices have tpo be processed.
We have to create an "empty" Vertex Array Object (without any vertex attribute specification):
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
And to draw 2*(N-1) triangle (6*(N-1) vertices):
glDrawArrays(GL_TRIANGLES, 0, 6*(N-1));
For the coordinate array in the SSBO, the data type vec4 is used (Pleas believe me, you don't want to use vec3):
layout(std430, binding = 0) buffer TVertex
{
vec4 vertex[];
};
Compute the index of the line segment, where the vertex coordinate belongs too and the index of the point in the 2 triangles:
int line_i = gl_VertexID / 6;
int tri_i = gl_VertexID % 6;
Since we are drawing N-1 line segments, but the number of elements in the array is N+2, the elements form vertex[line_t] to vertex[line_t+3] can be accessed for each vertex which is processed in the vertex shader.
vertex[line_t+1] and vertex[line_t+2] are the start respectively end coordinate of the line segment. vertex[line_t] and vertex[line_t+3] are required to compute the miter.
The thickness of the line should be set in pixel unit (uniform float u_thickness). The coordinates have to be transformed from model space to window space. For that the resolution of the viewport has to be known (uniform vec2 u_resolution). Don't forget the perspective divide. The drawing of the line will even work at perspective projection.
vec4 va[4];
for (int i=0; i<4; ++i)
{
va[i] = u_mvp * vertex[line_i+i];
va[i].xyz /= va[i].w;
va[i].xy = (va[i].xy + 1.0) * 0.5 * u_resolution;
}
The miter calculation even works if the predecessor or successor point is equal to the start respectively end point of the line segment. In this case the end of the line is cut normal to its tangent:
vec2 v_line = normalize(va[2].xy - va[1].xy);
vec2 nv_line = vec2(-v_line.y, v_line.x);
vec2 v_pred = normalize(va[1].xy - va[0].xy);
vec2 v_succ = normalize(va[3].xy - va[2].xy);
vec2 v_miter1 = normalize(nv_line + vec2(-v_pred.y, v_pred.x));
vec2 v_miter2 = normalize(nv_line + vec2(-v_succ.y, v_succ.x));
In the final vertex shader we just need to calculate either v_miter1 or v_miter2 dependent on the tri_i. With the miter, the normal vector to the line segment and the line thickness (u_thickness), the vertex coordinate can be computed:
vec4 pos;
if (tri_i == 0 || tri_i == 1 || tri_i == 3)
{
vec2 v_pred = normalize(va[1].xy - va[0].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_pred.y, v_pred.x));
pos = va[1];
pos.xy += v_miter * u_thickness * (tri_i == 1 ? -0.5 : 0.5) / dot(v_miter, nv_line);
}
else
{
vec2 v_succ = normalize(va[3].xy - va[2].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_succ.y, v_succ.x));
pos = va[2];
pos.xy += v_miter * u_thickness * (tri_i == 5 ? 0.5 : -0.5) / dot(v_miter, nv_line);
}
Finally the window coordinates have to be transformed back to clip space coordinates. Transform from window space to normalized device space. The perspective divide has to be reversed:
pos.xy = pos.xy / u_resolution * 2.0 - 1.0;
pos.xyz *= pos.w;
Polygons created with glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) and glPolygonMode(GL_FRONT_AND_BACK, GL_LINE):
Demo program using GLFW API for creating a window, GLEW for loading OpenGL and GLM -OpenGL Mathematics for the math. (I don't provide the code for the function CreateProgram, which just creates a program object, from the vertex shader and fragment shader source code):
#include <vector>
#include <string>
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <gl/gl_glew.h>
#include <GLFW/glfw3.h>
std::string vertShader = R"(
#version 460
layout(std430, binding = 0) buffer TVertex
{
vec4 vertex[];
};
uniform mat4 u_mvp;
uniform vec2 u_resolution;
uniform float u_thickness;
void main()
{
int line_i = gl_VertexID / 6;
int tri_i = gl_VertexID % 6;
vec4 va[4];
for (int i=0; i<4; ++i)
{
va[i] = u_mvp * vertex[line_i+i];
va[i].xyz /= va[i].w;
va[i].xy = (va[i].xy + 1.0) * 0.5 * u_resolution;
}
vec2 v_line = normalize(va[2].xy - va[1].xy);
vec2 nv_line = vec2(-v_line.y, v_line.x);
vec4 pos;
if (tri_i == 0 || tri_i == 1 || tri_i == 3)
{
vec2 v_pred = normalize(va[1].xy - va[0].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_pred.y, v_pred.x));
pos = va[1];
pos.xy += v_miter * u_thickness * (tri_i == 1 ? -0.5 : 0.5) / dot(v_miter, nv_line);
}
else
{
vec2 v_succ = normalize(va[3].xy - va[2].xy);
vec2 v_miter = normalize(nv_line + vec2(-v_succ.y, v_succ.x));
pos = va[2];
pos.xy += v_miter * u_thickness * (tri_i == 5 ? 0.5 : -0.5) / dot(v_miter, nv_line);
}
pos.xy = pos.xy / u_resolution * 2.0 - 1.0;
pos.xyz *= pos.w;
gl_Position = pos;
}
)";
std::string fragShader = R"(
#version 460
out vec4 fragColor;
void main()
{
fragColor = vec4(1.0);
}
)";
// main
GLuint CreateSSBO(std::vector<glm::vec4> &varray)
{
GLuint ssbo;
glGenBuffers(1, &ssbo);
glBindBuffer(GL_SHADER_STORAGE_BUFFER, ssbo );
glBufferData(GL_SHADER_STORAGE_BUFFER, varray.size()*sizeof(*varray.data()), varray.data(), GL_STATIC_DRAW);
return ssbo;
}
int main(void)
{
if ( glfwInit() == 0 )
throw std::runtime_error( "error initializing glfw" );
GLFWwindow *window = glfwCreateWindow( 800, 600, "GLFW OGL window", nullptr, nullptr );
if (window == nullptr)
{
glfwTerminate();
throw std::runtime_error("error initializing window");
}
glfwMakeContextCurrent(window);
if (glewInit() != GLEW_OK)
throw std::runtime_error("error initializing glew");
OpenGL::CContext::TDebugLevel debug_level = OpenGL::CContext::TDebugLevel::all;
OpenGL::CContext context;
context.Init( debug_level );
GLuint program = OpenGL::CreateProgram(vertShader, fragShader);
GLint loc_mvp = glGetUniformLocation(program, "u_mvp");
GLint loc_res = glGetUniformLocation(program, "u_resolution");
GLint loc_thi = glGetUniformLocation(program, "u_thickness");
glUseProgram(program);
glUniform1f(loc_thi, 20.0);
GLushort pattern = 0x18ff;
GLfloat factor = 2.0f;
std::vector<glm::vec4> varray;
varray.emplace_back(glm::vec4(0.0f, -1.0f, 0.0f, 1.0f));
varray.emplace_back(glm::vec4(1.0f, -1.0f, 0.0f, 1.0f));
for (int u=0; u <= 90; u += 10)
{
double a = u*M_PI/180.0;
double c = cos(a), s = sin(a);
varray.emplace_back(glm::vec4((float)c, (float)s, 0.0f, 1.0f));
}
varray.emplace_back(glm::vec4(-1.0f, 1.0f, 0.0f, 1.0f));
for (int u = 90; u >= 0; u -= 10)
{
double a = u * M_PI / 180.0;
double c = cos(a), s = sin(a);
varray.emplace_back(glm::vec4((float)c-1.0f, (float)s-1.0f, 0.0f, 1.0f));
}
varray.emplace_back(glm::vec4(1.0f, -1.0f, 0.0f, 1.0f));
varray.emplace_back(glm::vec4(1.0f, 0.0f, 0.0f, 1.0f));
GLuint ssbo = CreateSSBO(varray);
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, ssbo);
GLsizei N = (GLsizei)varray.size() - 2;
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glm::mat4(project);
int vpSize[2]{0, 0};
while (!glfwWindowShouldClose(window))
{
int w, h;
glfwGetFramebufferSize(window, &w, &h);
if (w != vpSize[0] || h != vpSize[1])
{
vpSize[0] = w; vpSize[1] = h;
glViewport(0, 0, vpSize[0], vpSize[1]);
float aspect = (float)w/(float)h;
project = glm::ortho(-aspect, aspect, -1.0f, 1.0f, -10.0f, 10.0f);
glUniform2f(loc_res, (float)w, (float)h);
}
glClear(GL_COLOR_BUFFER_BIT);
glm::mat4 modelview1( 1.0f );
modelview1 = glm::translate(modelview1, glm::vec3(-0.6f, 0.0f, 0.0f) );
modelview1 = glm::scale(modelview1, glm::vec3(0.5f, 0.5f, 1.0f) );
glm::mat4 mvp1 = project * modelview1;
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glUniformMatrix4fv(loc_mvp, 1, GL_FALSE, glm::value_ptr(mvp1));
glDrawArrays(GL_TRIANGLES, 0, 6*(N-1));
glm::mat4 modelview2( 1.0f );
modelview2 = glm::translate(modelview2, glm::vec3(0.6f, 0.0f, 0.0f) );
modelview2 = glm::scale(modelview2, glm::vec3(0.5f, 0.5f, 1.0f) );
glm::mat4 mvp2 = project * modelview2;
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
glUniformMatrix4fv(loc_mvp, 1, GL_FALSE, glm::value_ptr(mvp2));
glDrawArrays(GL_TRIANGLES, 0, 6*(N-1));
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwTerminate();
return 0;
}
I've been trying OpenGL recently and are stuck again in an issue.
If in my program I set colors via uniforms, I can draw multiple vertex arrays with any color of my choice. But passing of two buffers to be generated for an vertex array object results in weird coloration, where 0 is for vertex location and 1 is for color.
My main function :
int main(){
Window window(960,540);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
Reader read1("src/shaders/test.vert");
Reader read2("src/shaders/test.frag");
char * r1 = read1.getData();
char * r2 = read2.getData();
GLfloat vert[] = {
0, 0, 0,
0, 3, 0,
8, 3, 0,
8, 0, 0
};
GLushort indices[] = {
0,1,2,
2,3,0
};
GLfloat colors[] = {
1, 0, 1, 1,
1, 0, 1, 1,
1, 0, 1, 1,
1, 0, 1, 1,
};
VertexArray vao;
Buffer* vbo = new Buffer(vert, 4 * 4, 3);
vao.addBuffer(vbo, 0);
vao.addBuffer(new Buffer(colors,4 * 4 , 4), 1);
indexBuffer ibo(indices, 6);
Shader shader(r1, r2);
shader.enable();
shader.setUniformMat4("pr_matrix", mat4::orthographic(0.0f, 16.0f, 0.0f, 9.0f, -1.0f, 1.0f));
shader.setUniformMat4("ml_matrix", mat4::translation(vec3(4, 3, 0)));
shader.setUniform2f("light_pos", vec2(8.0f, 4.5f));
shader.setUniform4f("colour", vec4(0.2, 0.3, 0.8, 1));
while (!window.closed()){
window.clear();
double x, y;
x = window.getX();
y = window.getY();
shader.setUniform2f("light_pos", vec2((float)((x)*16.0f / 960.0f), (float)(9 - 9 * (y) / 540.0f)));
vao.bind();
ibo.bind();
shader.setUniform4f("colour", vec4(0.2, 0.3, 0.8, 1));
shader.setUniformMat4("ml_matrix", mat4::translation(vec3(4, 3, 0)));
glDrawElements(GL_TRIANGLES, ibo.getCount(), GL_UNSIGNED_SHORT, 0);
ibo.unbind();
vao.unbind();
window.update();
}
return 0;
}
My vertex shaders :
#version 410 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec4 color;
uniform mat4 pr_matrix ;
uniform mat4 vw_matrix = mat4(1.0f);
uniform mat4 ml_matrix = mat4(1.0f);
out DATA{
vec4 position;
vec4 color;
} vs_out;
out vec4 pos;
void main(){
gl_Position = pr_matrix * vw_matrix * ml_matrix * vec4(position,1) ;
vs_out.position = ml_matrix * vec4(position,1);
vs_out.color = color;
}
My fragment shaders :
#version 410 core
layout(location = 0) out vec4 color ;
uniform vec4 colour;
uniform vec2 light_pos;
in DATA{
vec4 position;
vec4 color;
} fs_in;
void main(){
float intensity = 1.0f / length(fs_in.position.xy - light_pos);
//color = fs_in.color * intensity;
color = fs_in.color * intensity;
}
My buffer class in case its needed to be corrected:
Buffer::Buffer(GLfloat *data, GLsizei count, GLuint compCountExt) : compCount (compCountExt) {
glGenBuffers(1, &bufferId);
glBindBuffer(GL_ARRAY_BUFFER,bufferId);
glBufferData(GL_ARRAY_BUFFER, count* sizeof(GLfloat), data, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Buffer::bind() const {
glBindBuffer(GL_ARRAY_BUFFER, bufferId);
}
void Buffer::unbind() const {
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
EDIT:
code of the vertexArray Class:
VertexArray::VertexArray(){
glGenVertexArrays(1,&arrayID);
}
void VertexArray::bind() const{
glBindVertexArray(arrayID);
}
void VertexArray::unbind() const{
glBindVertexArray(0);
}
VertexArray::~VertexArray(){
}
void VertexArray::addBuffer(Buffer* buffer, GLuint index){
bind();
glBindBuffer(GL_ARRAY_BUFFER, arrayID);
glEnableVertexAttribArray(index);
glVertexAttribPointer(index, buffer->getComCount(), GL_FLOAT, GL_FALSE, 0, 0);
buffer->unbind();
unbind();
}
there are calls to vertex attrib pointer in this class.
glVertexAttribPointer refers to the currently bound array buffer. This means you have to bind the array buffer befor you use glVertexAttribPointer:
void VertexArray::addBuffer(Buffer* buffer, GLuint index){
bind();
// glBindBuffer(GL_ARRAY_BUFFER, arrayID); <---- skip
buffer->bind(); // <---- bind the array buffer
glEnableVertexAttribArray(index);
glVertexAttribPointer(index, buffer->getComCount(), GL_FLOAT, GL_FALSE, 0, 0);
buffer->unbind();
unbind();
}
See OpenGL 4.6 Specification - 10.3.9 Vertex Arrays in Buffer Objects:
A buffer object binding point is added to the client state associated with each
vertex array index. The commands that specify the locations and organizations of vertex arrays copy the buffer object name that is bound to ARRAY_BUFFER to
the binding point corresponding to the vertex array index being specified. For example, the VertexAttribPointer command copies the value of ARRAY_BUFFER_BINDING.
For everyone unfamiliar, OpenGL instanced drawing is where many objects are drawn with one shader call, so glDrawArrays is only called once for a thousand objects on the screen instead of once for every object.
Now the question is: how do I implement instanced rendering in OpenGL 3 for objects which have constantly changing vertices? Creating an array or specifying a position on the vertex shader dedicated specifically to where the objects are won't work, as I'm dealing with a constantly changing vector of objects which shift coordinates in different velocities every frame.
The header for the object class I'm working with, and the vertex shader I have, are described below for reference.
//CLASS
class Laser {
public:
GLfloat x, y, xVelocity, yVelocity;
GLuint texture;
GLfloat angle;
GLfloat velocity;
GLfloat width, height;
GLfloat drawWidth = 16;
GLfloat drawHeight = 16;
GLfloat damage;
GLint actsToDissapear = -1;
GLint actsExisting = 0;
GLboolean expired = false;
GLboolean isRotated = false;
GLboolean variableColor = false;
glm::vec3 color;
std::string type = "Laser";
Laser(GLfloat damage, GLfloat width, GLfloat height, GLuint texture, GLfloat x, GLfloat y, GLfloat xVelocity, GLfloat yVelocity, GLfloat drawWidth, GLfloat drawHeight, GLfloat actsToDissapear, GLboolean isRotated, GLfloat angle, GLboolean variableColor, glm::vec3 color);
virtual void draw(SpriteRenderer* s);
virtual void move(Rachel* player);
};
//VERTEX SHADER
#version 330 core
layout (location = 0) in vec4 vertex;
uniform mat4 model;
uniform mat4 projection;
out vec2 TexCoords;
void main() {
TexCoords = vec2(vertex.z, vertex.w);
gl_Position = projection * model * vec4(vertex.xy, 0.0, 1.0);
}
The concept you look for is attribute divisor. See glVertexAttribDivisor.
In a few words: you change your model matrix from uniform to an instanced attribute that's read from a buffer. Each frame you update that buffer with the new positions of each instance. One thing to consider when implementing this is to use (vec3 offset, quat4 orientation) representation for the model matrix in order to reduce the number of consumed attributes by half. Also, depending on the exact problem you have at hand, you can update that buffer directly on the GPU with compute shaders.
Heres a code example of what I think you're looking for. I used instanced rendering for my particle system, it supports textures, colors and movement. Works both on android opengl es and windows opengl. This code requires some work to run, but it should be fairly easy to get going.
#include "ParticleSystem.h"
#include "Engine.h"
#include "Transform.h"
#include "Shader.h"
#include "Texture.h"
#include "Mesh.h"
#include "ShaderHandler.h"
ParticleSystem::ParticleSystem()
{
}
ParticleSystem::~ParticleSystem()
{
shader = nullptr;
texture = nullptr;
glDeleteVertexArrays(1, &vertexArrayObject);
}
void ParticleSystem::init(Engine * engine, float size, Texture * texture, float maxVelocity, bool gravity)
{
this->maxVelocity = maxVelocity;
this->gravity = gravity;
this->size = size;
vertex =
{
-size, -size, 0.0f,
-size, size, 0.0f,
size, size, 0.0f,
size, -size, 0.0f
};
indices =
{
1, 0, 2, 3
};
this->shader = engine->getShaderHandler()->loadShader("res/shaders/texturedInstancedShader");
this->texture = texture;
glGenVertexArrays(1, &this->vertexArrayObject);
glBindVertexArray(this->vertexArrayObject);
glGenBuffers(ParticleSystem::NUM_BUFFERS, this->vertexArrayBuffer);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->VERTEX_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * this->vertex.size(), &this->vertex[0], GL_STATIC_DRAW); //send model to GPU
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->TEXTURE_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec2) * this->texCoords.size(), &this->texCoords[0], GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, this->vertexArrayBuffer[this->INDEX_VB]);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(unsigned int) * indices.size(), &this->indices[0], GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * this->positions.size(), NULL, GL_STREAM_DRAW); //NULL (empty) buffer
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->COLOR_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec4) * this->colors.size(), NULL, GL_STREAM_DRAW); //NULL (empty) buffer
glBindVertexArray(0);
}
void ParticleSystem::createPoint(float pps, float deltaTime, glm::vec3 position, float maxLife, glm::vec4 color, glm::vec3 velocity)
{
Particle particle;
float amountPerSecond = pps * deltaTime;
for (float i = 0; i < amountPerSecond; i++)
{
particle.life = (rand() % static_cast<int>(maxLife * 100)) / 100.f;
particle.velocity =
{
((rand() % 200 / 100.f) - 1.f) * velocity.x,
((rand() % 200 / 100.f) - 1.f) * velocity.y,
((rand() % 200 / 100.f) - 1.f) * velocity.z
};
particles.emplace_back(particle);
positions.emplace_back(position);
colors.emplace_back(color);
}
}
void ParticleSystem::draw(glm::mat4 view)
{
if (particles.size() > 0)
{
Transform transform;
this->shader->bind();
this->shader->loadTransform(transform, view);
this->shader->loadInt(U_TEXTURE0, 0);
this->texture->bind(0);
glBindVertexArray(vertexArrayObject);
glVertexAttribDivisor(0, 0);
glVertexAttribDivisor(1, 1);
glVertexAttribDivisor(2, 1);
glVertexAttribDivisor(3, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->VERTEX_VB]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->POSITION_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec3) * positions.size(), &positions[0], GL_STREAM_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->COLOR_VB]);
glBufferData(GL_ARRAY_BUFFER, sizeof(glm::vec4) * colors.size(), &colors[0], GL_STREAM_DRAW);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, 0, (void*)0);
glEnableVertexAttribArray(3);
glBindBuffer(GL_ARRAY_BUFFER, this->vertexArrayBuffer[this->TEXTURE_VB]);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, 0, (void*)0);
glDrawElementsInstanced(GL_TRIANGLE_STRIP, indices.size(), GL_UNSIGNED_INT, 0, positions.size());
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glBindVertexArray(0);
}
}
void ParticleSystem::update(float deltaTime)
{
for (std::size_t i = 0; i < particles.size(); i++)
{
particles[i].life -= (1.f * deltaTime); //decrease life with 1 per second
if (particles[i].life <= 0.f) //dead
{
particles.erase(particles.begin() + i);
colors.erase(colors.begin() + i);
positions.erase(positions.begin() + i);
continue;
}
if (this->gravity == true)
{
if (particles[i].velocity.y > -maxVelocity)
{
particles[i].velocity.y -= maxVelocity * deltaTime; //1 second to reach maximum velocity
}
else
{
particles[i].velocity.y = -maxVelocity;
}
}
positions[i] += (particles[i].velocity * deltaTime);
}
}
Heres the shader:
vertex shader:
#version 330 core
layout(location = 0) in vec3 vertex;
layout(location = 1) in vec3 positions;
layout(location = 2) in vec4 colors;
layout(location = 3) in vec2 texCoords;
out vec2 texCoord;
out vec4 color;
uniform mat4 transform;
void main()
{
color = colors;
texCoord = texCoords;
gl_Position = transform * vec4(vertex + positions, 1.0);
}
fragment shader:
#version 330 core
in vec4 color;
in vec2 texCoord;
out vec4 colors;
uniform sampler2D texture0;
void main()
{
vec4 texel = texture2D(texture0, texCoord);
if (texel.a <= 0.5)
{
discard;
}
colors = color * texel;
}