OpenGL - GL_LINE_STRIP acts like GL_LINE_LOOP - c++

Im using OpenGL 3.3 with GLFW.
The problem is that GL_LINE_STRIP and GL_LINE LOOP give the same result.
Here is the array of 2D coordinates:
GLfloat vertices[] =
{
0, 0,
1, 1,
1, 2,
2, 2,
3, 1,
};
The attribute pointer:
// Position attribute 2D
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
And finally:
glDrawArrays(GL_LINE_STRIP, 0, sizeof(vertices)/4);
Vertex shader:
#version 330 core
layout (location = 0) in vec2 position;
layout (location = 1) in vec3 color;
out vec3 ourColor;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 0.0f, 1.0f);
ourColor = color;
}
Fragment shader:
#version 330 core
in vec3 ourColor;
out vec3 color;
void main()
{
color = vec3(ourColor);
}
The Color attrib. is disabled (lines are black and visible)
Any idea?

You have only 5 pairs of floats, so 5 vertices. Total size of your array is 4 times 10 floats, so 40 bytes.
Your equation for count, 40/4 gives 10. sizeof(array) / (sizeof(array[0]) * dimensionality) would be the correct equation there.

Related

GLSL Geometry Shader causes "No vertex shader bound at draw" in RenderDoc

I am trying to get a basic geometry shader to work, but I am completely failing. After checking numerous resources, I still cannot find a solution to my problem.
Here is my code for my vertex, geometry, and fragment shaders.
Vertex Shader:
#version 330 core
// Vertex Shader Inputs
layout (location = 0) in vec3 Pos;
layout (location = 1) in vec3 Norm;
layout (location = 2) in vec3 Color;
// Vertex to Fragment Shader Outputs
out DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_out;
// Main.cpp Imports
uniform mat4 camMatrix; // viewProjection Matrix
uniform mat4 model;
void main()
{
vec3 vsPos = vec3(model * vec4(Pos, 1.0f));
vec3 vsNorm = mat3(transpose(inverse(model))) * Norm; // Normal vector correction
vec4 vsColor = vec4(Color, 1.0f);
gl_Position = camMatrix * vec4(vsPos, 1.0f);
}
Geometry Shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
out vec3 gsPos;
out vec3 gsNorm;
out vec3 gsColor;
in DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_in[];
uniform mat4 camMatrix;
void main()
{
for (int i=0; i<3; i++)
{
gsPos = data_in[i].vsPos;
gsNorm = data_in[i].vsNorm;
gsColor = data_in[i].vsColor;
gl_Position = camMatrix * vec4(data_in[i].vsPos, 1.0f);
EmitVertex();
}
EndPrimitive();
}
Fragment Shader:
#version 330 core
out vec4 FragColor;
// Fragment Shader Inputs
in vec3 gsPos;
in vec3 gsNorm;
in vec4 gsColor;
// Fragment Shader Uniforms
uniform sampler2D diffuse0;
uniform sampler2D specular0;
uniform vec4 lightColor;
uniform vec3 lightPos;
uniform vec3 camPos;
vec4 pointLight()
{
vec3 lightVec = (lightPos - vsPos);
// intensity of light with respect to distance
float dist = length(lightVec);
float a = 0.7;
float b = 0.4;
float c = 1.0;
float inten = 1.0f / (a * dist * dist + b * dist + c);
// ambient lighting
float ambient = 0.75f;
// diffuse lighting
vec3 fsNorm = normalize(gsNorm);
vec3 lightDirection = normalize(lightVec);
float diffuse = max(dot(fsNorm, lightDirection), 0.0f);
// specular lighting
float specular = 0.0f;
if (diffuse != 0.0f)
{
float specularLight = 0.50f;
vec3 viewDirection = normalize(gsNorm - gsPos);
vec3 halfwayVec = normalize(viewDirection + lightDirection);
float specAmount = pow(max(dot(fsNorm, halfwayVec), 0.0f), 32);
specular = specAmount * specularLight;
};
return inten * (gsColor * (diffuse + ambient) + gsColor * specular) * lightColor;
}
void main()
{// outputs final color
FragColor = pointLight();
}
My mesh generation function:
void genMesh()
{
VAO.Bind();
VBO VBO(vtx);
EBO EBO(idx);
VAO.LinkAttrib(VBO, 0, 3, GL_FLOAT, sizeof(Vertex), (void*)0);
VAO.LinkAttrib(VBO, 1, 3, GL_FLOAT, sizeof(Vertex), (void*)(3 * sizeof(float)));
VAO.LinkAttrib(VBO, 2, 4, GL_FLOAT, sizeof(Vertex), (void*)(6 * sizeof(float)));
VAO.Unbind();
VBO.Unbind();
EBO.Unbind();
};
My mesh draw function:
void Mesh::Draw(Shader& shader, Camera& camera)
{
shader.Activate();
VAO.Bind();
// Take care of the camera Matrix
glUniform3f(glGetUniformLocation(shader.ID, "camPos"),
camera.Position.x,
camera.Position.y,
camera.Position.z);
camera.Matrix(shader, "camMatrix");
// Draw the actual mesh
glDrawElements(GL_TRIANGLES, idx.size() * sizeof(GLuint), GL_UNSIGNED_INT, 0);
};
I call my mesh generation function outside of the main while loop, then I draw the mesh in my main while loop.
Debugging my program through RenderDoc gives me the error, "No vertex shader bound at draw!" Without the geometry shader (keeping everything else roughly the same), I do not get any errors in RenderDoc. I tried updating my graphics drivers, but I am just getting the same error. Please help me, I feel like I am losing my mind.
In the fragment shader gsColor is defined at a vec4 variable but in the geometry shader it is declared a vec3 variable

openGL rectangle rendering triangle instead?

So i'm trying to render a rectangle in openGL using index buffers however instead i'm getting a triangle with one vertex at the origin (even though no vertex in my rectangle is suppsoed to go at the origin).
void Renderer::drawRect(int x,int y,int width, int height)
{
//(Ignoring method arguments for debugging)
float vertices[12] = {200.f, 300.f, 0.f,
200.f, 100.f, 0.f,
600.f, 100.f, 0.f,
600.f, 300.f, 0.f};
unsigned int indices[6] = {0,1,3,1,2,3};
glBindVertexArray(this->flat_shape_VAO);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER,this->element_buffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER,sizeof(indices),indices,GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER,this->render_buffer);
glBufferData(GL_ARRAY_BUFFER,sizeof(vertices),vertices,GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(0);
glUseProgram(this->shader_program);
glUniformMatrix4fv(this->model_view_projection_uniform,1,GL_FALSE,glm::value_ptr(this->model_view_projection_mat));
glUniform3f(this->color_uniform,(float) this->color.r,(float)this->color.g,(float)this->color.b);
glDrawElements(GL_TRIANGLES,6,GL_UNSIGNED_INT,nullptr);
}
My projection matrix is working fine I can still render a triangle at the correct screen coords. I suspect maybe I did index buffering wrong? Transformation matrices also work fine, atleast on my triangles.
Edit:
The VAO's attributes are set up in the class constructor with glVertexAttribPointer();
Edit 2:
I disabled shaders completely and something interesting happened.
Here is the shader source code:
(vertex shader)
#version 330 core
layout (location = 0) in vec3 aPos;
uniform mat4 mvp;
uniform vec3 aColor;
out vec3 color;
void main()
{
gl_Position = mvp * vec4(aPos, 1.0);
color = aColor;
}
(fragment shader)
#version 330 core
in vec3 color;
out vec4 FragColor;
void main()
{
FragColor = vec4(color,1.0f);
}
My projection matrix shouldn't work with shaders disabled yet I still see a triangle rendering on the screen..??
What is the stride argument of glVertexAttribPointer? stride specifies the byte offset between consecutive generic vertex attributes. In your case it should be 0 or 12 (3*sizeof(float)) but if you look at your images it seems to be 24 because the triangle has the 1st (200, 300) and 3rd (600, 100) vertices and one more vertex with the coordinate (0, 0).
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), nullptr);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), nullptr);

Vector 4 not representing the colors of all the verticii

I'm trying to have 4 integers represent the colors of all the verticii in a VBO by having the stride on the color vertex attribute pointer, however, It seems to only take the value once for the color, and, as a result, assigns the rest of the verticii as black as in the picture: picture. The expected result is that all the verticii will be white.
Here is the relevant pieces of code:
int triangleData[18] =
{
2147483647,2147483647,2147483647,2147483647,//opaque white
0,100, //top
100,-100, //bottom right
-100,-100 //bottom left
};
unsigned int colorVAO, colorVBO;
glGenVertexArrays(1, &colorVAO);
glGenBuffers(1, &colorVBO);
glBindVertexArray(colorVAO);
glBindBuffer(GL_ARRAY_BUFFER, colorVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(triangleData), triangleData, GL_STATIC_DRAW);
glVertexAttribPointer(0, 2, GL_INT, GL_FALSE, 2 * sizeof(int), (void*)(4*sizeof(int)));
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 4, GL_INT, GL_TRUE, 0, (void*)0);
glEnableVertexAttribArray(1);
Vertex shader:
#version 330 core
layout (location = 0) in vec2 aPos;
layout (location = 1) in vec4 aColor;
out vec4 Color;
uniform mat4 model;
uniform mat4 view;
uniform mat4 ortho;
void main()
{
gl_Position = ortho * view * model * vec4(aPos, 1.0, 1.0);
Color = aColor;
}
Fragment shader:
#version 330 core
out vec4 FragColor;
in vec4 Color;
void main()
{
FragColor = Color;
}
From the documentation of glVertexAttribPointer:
stride
Specifies the byte offset between consecutive generic vertex attributes. If stride is 0, the generic vertex attributes are understood to be tightly packed in the array.
Setting the stride to 0 does not mean that the same data is read for each vertex. It means that the data is packed one after the other in the buffer.
If you want all the vertices to use the same data, you can either disable the attribute and use glVertexAttrib, or you can use the separate vertex format (available starting from OpenGL 4.3 or with ARB_vertex_attrib_binding) similar to:
glBindVertexBuffer(index, buffer, offset, 0);
where a stride of 0 really means no stride.

Issue with passing integer vertex attributes with "in" keyword

I'm working on bone animation. I have a vertex struct that basically looks like
struct MeshVertex
{
glm::vec3 pos;
glm::vec3 normal;
glm::vec2 tex;
glm::vec3 tangent;
glm::vec3 bitangent;
uint32_t ids[4] = {};
float weights[4] = {};
void print() const;
};
The mesh is a basic cube with one bone. Therefore ids = {0,0,0,0} and weights = {1.0f,0.0f,0.0f,0.0f} for every single vertex. In my mesh class I have a static function Mesh::genFormat() that handles attributes. vao is a static int in the mesh class and for_i is just a convenient macro I use to do for loops. Note that I correctly use glVertexArrayAttribIFormat.
Mesh::Mesh(const std::vector<MeshVertex>& vertices, const std::vector<uint>& indices, const std::vector<Texture>& textures)
{
m_textures = textures;
m_num_indices = indices.size();
// create vertex and index buffers
glCreateBuffers(1, &m_vbo);
glCreateBuffers(1, &m_ibo);
glNamedBufferData(m_vbo, sizeof(MeshVertex) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
glNamedBufferData(m_ibo, sizeof(uint) * indices.size(), &indices[0], GL_STATIC_DRAW);
}
void Mesh::genFormat()
{
glCreateVertexArrays(1, &vao);
for_i(7) { glEnableVertexArrayAttrib(vao, i); }
glVertexArrayAttribFormat(vao, 0, 3, GL_FLOAT, false, offsetof(MeshVertex, pos)));
glVertexArrayAttribFormat(vao, 1, 3, GL_FLOAT, false, offsetof(MeshVertex, normal));
glVertexArrayAttribFormat(vao, 2, 2, GL_FLOAT, false, offsetof(MeshVertex, tex));
glVertexArrayAttribFormat(vao, 3, 3, GL_FLOAT, false, offsetof(MeshVertex, tangent));
glVertexArrayAttribFormat(vao, 4, 3, GL_FLOAT, false, offsetof(MeshVertex, bitangent));
glVertexArrayAttribIFormat(vao, 5, 4, GL_UNSIGNED_INT, offsetof(MeshVertex, ids)));
glVertexArrayAttribFormat(vao, 6, 4, GL_FLOAT, false, offsetof(MeshVertex, weights)));
for_i(7) { glVertexArrayAttribBinding(vao, i, 0); }
glBindVertexArray(0);
}
The following GLSL won't render anything.
#version 460 core
layout(location = 0) in vec3 Pos;
layout(location = 1) in vec3 Normal;
layout(location = 2) in vec2 Tex;
layout(location = 3) in vec3 Tan;
layout(location = 4) in vec3 BiTan;
layout(location = 5) in uvec4 BoneIds;
layout(location = 6) in vec4 Weights;
out vec3 normal;
out vec2 tex;
layout(binding = 2, std140) uniform Camera
{
mat4 VP;
vec4 cpos;
};
uniform mat4 node;
uniform mat4 bones_inverse_bind_mesh_parent[50];
void main()
{
tex = Tex;
mat4 W = mat4(0.0f);
if (Weights[0] != 0.0f)
{
for (uint i = 0; i < 4; i++)
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
W = node * W;
}
else
W = node;
gl_Position = VP * W * vec4(Pos, 1.0);
}
Since BoneIds[i] is always zero, if I replace
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
with
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[0]);
the result should be unchanged. My matrix transforms are currently a bit off (something to fix later), but now the cube renders fine. So there is something wrong with BoneIds. After bashing my head against the wall on this for a while, I instead replaced
layout(location = 5) in uvec4 BoneIds;
with
layout(location = 5) varying uvec4 BoneIds;
after seeing some old GLSL online, and now everything works. What I don't understand is why. I've seen plenty of GLSL code on the internet work with integer attributes using the in keyword.
UPDATE :
If I replace glVertexArrayAttribIFormat in Mesh::genFormat() with
glVertexArrayAttribFormat(vao, 5, 4, GL_UNSIGNED_INT, false, offsetof(MeshVertex, ids));
in C++ and
layout(location = 5) in vec4 BoneIds;
in GLSL and cast bone ids from float to int in the glsl code, the code also works.
Okay I solved the issue, even though I don't quite understand how this fixes the problem. My preferred graphics processor was on auto but when I forced it to use the NVIDIA processor over my integrated graphics, everything works out fine. image of solution
Update :
I think it is as simple as my Intel processor graphics supporting OpenGL 4.4 and glVertexArrayAttribIFormat came about in OpenGL 4.5.

OpenGL - vertex color in shader gets swapped

I'm trying to send colors to the shader but the colors get swapped,
I send 0xFF00FFFF (magenta) but I get 0xFFFF00FF (yellow) in the shader.
I think is happening something like this, by experimenting:
My vertex shader:
#version 330 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
out DATA
{
vec4 position;
vec3 normal;
vec4 color;
} vs_out;
void main()
{
gl_Position = pr_matrix * vw_matrix * ml_matrix * position;
vs_out.position = position;
vs_out.color = color;
vs_out.normal = normalize(mat3(ml_matrix) * normal);
}
And the fragment shader:
#version 330 core
layout(location = 0) out vec4 out_color;
in DATA
{
vec3 position;
vec3 normal;
vec4 color;
} fs_in;
void main()
{
out_color = fs_in.color;
//out_color = vec4(fs_in.color.y, 0, 0, 1);
//out_color = vec4((fs_in.normal + 1 / 2.0), 1.0);
}
Here is how I set up the mesh:
struct Vertex_Color {
Vec3 vertex;
Vec3 normal;
GLint color; // GLuint tested
};
std::vector<Vertex_Color> verts = std::vector<Vertex_Color>();
[loops]
int color = 0xFF00FFFF; // magenta, uint tested
verts.push_back({ vert, normal, color });
glBufferData(GL_ARRAY_BUFFER, verts.size() * sizeof(Vertex_Color), &verts[0], GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, normal)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, color)));
glEnableVertexAttribArray(2);
Here are some examples:
I can't figure it out what's wrong. Thanks in advance.
Your code is reinterpreting an int as 4 consecutive bytes in memory. The internal encoding for int (and all other types) is machine-specific. In your case, you got 32 bit integers stored in little endian byte order, which is kind of the typical case for PC environments.
You could use an array like GLubyte color[4] to explicitely get a defined memory layout.
If you really want to use an integer type, you could send the data as a an integer attribute with glVertexAttribIPointer (note the I there) and use unpackUnorm4x8 om the shader to get a normalized float vector. However, that requires at least GLSL 4.10, and might be less efficient than the standard approach.