Names of OpenGL attributes for the vertex shader - c++

I currently load some attributes for my vertex shader in the following way:
glBindBuffer(GL_ARRAY_BUFFER, MeshVBs[eyeNum]->GLBuffer);
UINT stride = sizeof(ovrDistortionVertex);
const VertexAttribDesc VertexDesc[] =
//Name, Size, Type, Normalized, Offset
{ {"Position", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, ScreenPosNDC)},
{"inV", 1, GL_FLOAT, false, offsetof(ovrDistortionVertex, VignetteFactor)},
{"inTexCoord0", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesR)},
{"inTexCoord1", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesG)},
{"inTexCoord2", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesB)} };
for (size_t i = 0; i < 5; i++)
{
VertexAttribDesc vad = VertexDesc[i];
glEnableVertexAttribArray((GLuint)i);
glVertexAttribPointer((GLuint)i, vad.Size, vad.Type, vad.Normalized, stride, reinterpret_cast<char*>(vad.Offset));
}
That is the used vertex shader
#version 330 core
uniform vec2 EyeToSourceUVScale;
uniform vec2 EyeToSourceUVOffset;
attribute vec2 Position;
attribute float inT;
attribute vec2 inTexCoord0;
attribute vec2 inTexCoord1;
attribute vec2 inTexCoord2;
varying vec4 oPosition;
varying vec2 oTexCoord0;
varying vec2 oTexCoord1;
varying vec2 oTexCoord2;
varying float oVignette;
vec2 TexCoord0 = vec2((inTexCoord0.x), (-inTexCoord0.y));
vec2 TexCoord1 = vec2((inTexCoord1.x), (-inTexCoord1.y));
vec2 TexCoord2 = vec2((inTexCoord2.x), (-inTexCoord2.y));
float Vignette = inT;
vec2 normalizeTexCoord( in vec2 TexCoord )
{
return ( EyeToSourceUVScale*TexCoord) + EyeToSourceUVOffset;
}
void main(){
oTexCoord0 = normalizeTexCoord( TexCoord0);
oTexCoord1 = normalizeTexCoord( TexCoord1);
oTexCoord2 = normalizeTexCoord( TexCoord2);
oVignette = Vignette;
gl_Position.xyzw = vec4( Position.xy , 0.500000, 1.00000);
}
That Works! But if i rename "inT" in the vertex shader into "inV" nothing works anymore. Any ideas why it doesn't work?
Let me know if you need more information about the context to give an answer.
----SOLUTION----
Before the main loop :
UINT stride = sizeof(ovrDistortionVertex);
const VertexAttribDesc VertexDesc[] =
//Name, Size, Type, Normalized, Offset
{ {"Position", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, ScreenPosNDC)},
{"inVignette", 1, GL_FLOAT, false, offsetof(ovrDistortionVertex, VignetteFactor)},
{"inTexCoord0", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesR)},
{"inTexCoord1", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesG)},
{"inTexCoord2", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesB)} };
for (int i = 0;i<5;i++)
{
VertexAttribDesc vad = VertexDesc[i];
glBindAttribLocation (shader_programm, i, vad.Name);
}
glLinkProgram(shader_programm);
In the main loop remained:
glBindBuffer(GL_ARRAY_BUFFER, MeshVBs[eyeNum]->GLBuffer);
for (size_t i = 0; i < 5; i++)
{
VertexAttribDesc vad = VertexDesc[i];
glEnableVertexAttribArray((GLuint)i);
glVertexAttribPointer((GLuint)i, vad.Size, vad.Type, vad.Normalized, stride, reinterpret_cast<char*>(vad.Offset));
}
and the new vertex shader (fragment shader has also changed varying -> in)
#version 330 core
uniform vec2 EyeToSourceUVScale;
uniform vec2 EyeToSourceUVOffset;
in vec2 Position;
in float inVignette;
in vec2 inTexCoord0;
in vec2 inTexCoord1;
in vec2 inTexCoord2;
out vec4 oPosition;
out vec2 oTexCoord0;
out vec2 oTexCoord1;
out vec2 oTexCoord2;
out float oVignette;
vec2 TexCoord0 = vec2((inTexCoord0.x), (-inTexCoord0.y));
vec2 TexCoord1 = vec2((inTexCoord1.x), (-inTexCoord1.y));
vec2 TexCoord2 = vec2((inTexCoord2.x), (-inTexCoord2.y));
float Vignette = inVignette;
vec2 normalizeTexCoord( in vec2 TexCoord )
{
return ( EyeToSourceUVScale*TexCoord) + EyeToSourceUVOffset;
}
void main(){
oTexCoord0 = normalizeTexCoord( TexCoord0);
oTexCoord1 = normalizeTexCoord( TexCoord1);
oTexCoord2 = normalizeTexCoord( TexCoord2);
oVignette = Vignette;
gl_Position.xyzw = vec4( Position.xy , 0.500000, 1.00000);
}

That syntax is not valid for a 330 core shader. You need to use the new in and out syntax that GLSL 130 introduced. That means in the vertex shader, inputs (attribute) all become in and outputs (varying) all become out. In the matching fragment shader, the outputs (varying) from the vertex shader are inputs and thus are declared using in.
With all that said, I see nowhere that you actually query the location of your vertex attributes. Many drivers will automatically assign attributes locations in alphabetical order, so changing inT to inV is going to change its location to come after inTexCoord2.
Since this is a GLSL 3.30 shader, you have the option of binding the attribute locations explicitly in the shader, binding them from the API before linking, or querying them after the fact.
To make this work as simply as possible, I would do something like this:
const VertexAttribDesc VertexDesc[] =
//Name, Size, Type, Normalized, Offset
{ {"Position", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, ScreenPosNDC)},
{"inV", 1, GL_FLOAT, false, offsetof(ovrDistortionVertex, VignetteFactor)},
{"inTexCoord0", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesR)},
{"inTexCoord1", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesG)},
{"inTexCoord2", 2, GL_FLOAT, false, offsetof(ovrDistortionVertex, TanEyeAnglesB)} };
for (size_t i = 0; i < 5; i++)
{
VertexAttribDesc vad = VertexDesc[i];
glBindAttribLocation (prog, i, vad.Name);
}
Keep in mind, that has to be done before you link your GLSL program. Attribute locations are assigned during the link operation. You can change them later, but you have to re-link the program.

Related

Issue with passing integer vertex attributes with "in" keyword

I'm working on bone animation. I have a vertex struct that basically looks like
struct MeshVertex
{
glm::vec3 pos;
glm::vec3 normal;
glm::vec2 tex;
glm::vec3 tangent;
glm::vec3 bitangent;
uint32_t ids[4] = {};
float weights[4] = {};
void print() const;
};
The mesh is a basic cube with one bone. Therefore ids = {0,0,0,0} and weights = {1.0f,0.0f,0.0f,0.0f} for every single vertex. In my mesh class I have a static function Mesh::genFormat() that handles attributes. vao is a static int in the mesh class and for_i is just a convenient macro I use to do for loops. Note that I correctly use glVertexArrayAttribIFormat.
Mesh::Mesh(const std::vector<MeshVertex>& vertices, const std::vector<uint>& indices, const std::vector<Texture>& textures)
{
m_textures = textures;
m_num_indices = indices.size();
// create vertex and index buffers
glCreateBuffers(1, &m_vbo);
glCreateBuffers(1, &m_ibo);
glNamedBufferData(m_vbo, sizeof(MeshVertex) * vertices.size(), &vertices[0], GL_STATIC_DRAW);
glNamedBufferData(m_ibo, sizeof(uint) * indices.size(), &indices[0], GL_STATIC_DRAW);
}
void Mesh::genFormat()
{
glCreateVertexArrays(1, &vao);
for_i(7) { glEnableVertexArrayAttrib(vao, i); }
glVertexArrayAttribFormat(vao, 0, 3, GL_FLOAT, false, offsetof(MeshVertex, pos)));
glVertexArrayAttribFormat(vao, 1, 3, GL_FLOAT, false, offsetof(MeshVertex, normal));
glVertexArrayAttribFormat(vao, 2, 2, GL_FLOAT, false, offsetof(MeshVertex, tex));
glVertexArrayAttribFormat(vao, 3, 3, GL_FLOAT, false, offsetof(MeshVertex, tangent));
glVertexArrayAttribFormat(vao, 4, 3, GL_FLOAT, false, offsetof(MeshVertex, bitangent));
glVertexArrayAttribIFormat(vao, 5, 4, GL_UNSIGNED_INT, offsetof(MeshVertex, ids)));
glVertexArrayAttribFormat(vao, 6, 4, GL_FLOAT, false, offsetof(MeshVertex, weights)));
for_i(7) { glVertexArrayAttribBinding(vao, i, 0); }
glBindVertexArray(0);
}
The following GLSL won't render anything.
#version 460 core
layout(location = 0) in vec3 Pos;
layout(location = 1) in vec3 Normal;
layout(location = 2) in vec2 Tex;
layout(location = 3) in vec3 Tan;
layout(location = 4) in vec3 BiTan;
layout(location = 5) in uvec4 BoneIds;
layout(location = 6) in vec4 Weights;
out vec3 normal;
out vec2 tex;
layout(binding = 2, std140) uniform Camera
{
mat4 VP;
vec4 cpos;
};
uniform mat4 node;
uniform mat4 bones_inverse_bind_mesh_parent[50];
void main()
{
tex = Tex;
mat4 W = mat4(0.0f);
if (Weights[0] != 0.0f)
{
for (uint i = 0; i < 4; i++)
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
W = node * W;
}
else
W = node;
gl_Position = VP * W * vec4(Pos, 1.0);
}
Since BoneIds[i] is always zero, if I replace
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[BoneIds[i]]);
with
W = W + (Weights[i] * bones_inverse_bind_mesh_parent[0]);
the result should be unchanged. My matrix transforms are currently a bit off (something to fix later), but now the cube renders fine. So there is something wrong with BoneIds. After bashing my head against the wall on this for a while, I instead replaced
layout(location = 5) in uvec4 BoneIds;
with
layout(location = 5) varying uvec4 BoneIds;
after seeing some old GLSL online, and now everything works. What I don't understand is why. I've seen plenty of GLSL code on the internet work with integer attributes using the in keyword.
UPDATE :
If I replace glVertexArrayAttribIFormat in Mesh::genFormat() with
glVertexArrayAttribFormat(vao, 5, 4, GL_UNSIGNED_INT, false, offsetof(MeshVertex, ids));
in C++ and
layout(location = 5) in vec4 BoneIds;
in GLSL and cast bone ids from float to int in the glsl code, the code also works.
Okay I solved the issue, even though I don't quite understand how this fixes the problem. My preferred graphics processor was on auto but when I forced it to use the NVIDIA processor over my integrated graphics, everything works out fine. image of solution
Update :
I think it is as simple as my Intel processor graphics supporting OpenGL 4.4 and glVertexArrayAttribIFormat came about in OpenGL 4.5.

glGetUniformLocation returning -1 during transform feedback when it is used

The function returns -1, even though Uniform is defined and used within the vertex shader, I suspect the cause may be that the out attributes might not be properly bound with the target buffer (not sure if that is the case). Without this one Uniform, most of my values will stay the same.
Drawing the Transform Feedback
/*code included in update*/
glUseProgram(feedbackShader->GetProgram());
glEnable(GL_RASTERIZER_DISCARD);
/*end of code included in update*/
glBindBuffer(GL_ARRAY_BUFFER, particleBuffer[isEvenBuffer]);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, transformBuffer[!isEvenBuffer]);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
glEnableVertexAttribArray(4);
glEnableVertexAttribArray(5);
glVertexAttribPointer(5, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), 0); //Location
glVertexAttribPointer(4, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)12); //Velocity
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)24); //InitLocation
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)36); //InitVelocity
glVertexAttribPointer(3, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)48); //Lifetime
glVertexAttribPointer(2, 1, GL_FLOAT, GL_FALSE, sizeof(Particle), (const GLvoid*)52); //InitLifetime
GLint uniformLocation = glGetUniformLocation(feedbackShader->GetProgram(), "time");
glUniform1f(uniformLocation, msec);
glBeginTransformFeedback(GL_POINTS);
glDrawTransformFeedback(GL_POINTS, transformBuffer[isEvenBuffer]);
glEndTransformFeedback();
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(5);
The Vertex Shader
#version 410
in vec3 inLocation;
in vec3 inVelocity;
in vec3 inInitLocation;
in vec3 inInitVelocity;
in float inLifeTime;
in float inInitlifeTime;
out vec3 outLocation;
out vec3 outVelocity;
out vec3 outInitLocation;
out vec3 outInitVelocity;
out float outLifeTime;
out float outInitlifeTime;
uniform float time;
vec3 Gravity = vec3(0.0f,-0.98f,0.0f);
float dampeningFactor = 0.5;
void main()
{
outLifeTime = inLifeTime - time;
if(outLifeTime > 0.0f){
outVelocity = (inVelocity + Gravity * time) * dampeningFactor;
outLocation = inLocation + inVelocity * time;
}else{
outVelocity = inInitVelocity;
outLocation = inInitLocation;
outLifeTime = inInitlifeTime;
}
outInitVelocity = inInitVelocity;
outInitLocation = inInitLocation;
outInitlifeTime = inInitlifeTime;
}
UPDATE
There were a few extra bits of information you all asked for.
The Vertex Shader
#version 410
in vec3 inLocation;
in vec3 inVelocity;
in vec3 inInitLocation;
in vec3 inInitVelocity;
in float inLifeTime;
in float inInitlifeTime;
out vec3 outLocation;
out vec3 outVelocity;
out vec3 outInitLocation;
out vec3 outInitVelocity;
out float outLifeTime;
out float outInitlifeTime;
uniform float time;
vec3 Gravity = vec3(0.0f,-0.98f,0.0f);
float dampeningFactor = 0.5;
void main()
{
outLifeTime = inLifeTime - time;
if(outLifeTime > 0.0f){
outVelocity = (inVelocity + Gravity * time) * dampeningFactor;
outLocation = inLocation + inVelocity * time;
}else{
outVelocity = inInitVelocity;
outLocation = inInitLocation;
outLifeTime = inInitlifeTime;
}
outInitVelocity = inInitVelocity;
outInitLocation = inInitLocation;
outInitlifeTime = inInitlifeTime;
}
The Feedback Varyings (I was stupid and didn't have this at first, though the same issue remains in my code)
const GLchar* feedbackVaryings[] = {
"outLocation",
"outVelocity",
"outInitLocation",
"outInitVelocity",
"outLifeTime",
"outInitlifeTime"
};
glTransformFeedbackVaryings(feedbackShader->LinkProgram(), 6, feedbackVaryings, GL_INTERLEAVED_ATTRIBS);
I also added the gluseProgram in the "Drawing the Transform Feedback section".
I assume that the instruction
feedbackShader->LinkProgram()
linkes the program. But the varying transform feedback variables have to be specified before linking the program.
See OpenGL 4.6 API Core Profile Specification; 7.3.1.1 Naming Active Resources; page 104
The order of the active resource list is implementation-dependent for all
interfaces except for TRANSFORM_FEEDBACK_VARYING. If variables in the
TRANSFORM_FEEDBACK_VARYING interface were specified using the TransformFeedbackVaryings command, the active resource list will be arranged in the variable order specified in the most recent call to TransformFeedbackVaryings before the last call to LinkProgram.
This means, first you have to attach the compiled vertex shader object to the program object (glAttachShader). Then you have to specify the transform feedback varying (glTransformFeedbackVaryings). Finally you have to link the program (glLinkProgram):
GLuint shader_obj;
GLuint program_obj;
.....
glAttachShader(program_obj, shader_obj);
const GLchar* feedbackVaryings[] = {
"outLocation",
"outVelocity",
"outInitLocation",
"outInitVelocity",
"outLifeTime",
"outInitlifeTime"
};
glTransformFeedbackVaryings(program_obj, 6, feedbackVaryings, GL_INTERLEAVED_ATTRIBS);
glLinkProgram(program_obj);
Further I recommend to use Layout Qualifier to define the attribute indices of the vertex shaser inpout variables.
e.g.
layout (location = 0) in vec3 inLocation;
layout (location = 1) in vec3 inVelocity;
layout (location = 2) in vec3 inInitLocation;
layout (location = 3) in vec3 inInitVelocity;
layout (location = 4) in float inLifeTime;
layout (location = 5) in float inInitlifeTime;
As an alternative the attribute index can be determined by glGetAttribLocation after linking the program or set by glBindAttribLocation before linking the program.

Fragment shader color error with multiple buffers

I've been trying OpenGL recently and are stuck again in an issue.
If in my program I set colors via uniforms, I can draw multiple vertex arrays with any color of my choice. But passing of two buffers to be generated for an vertex array object results in weird coloration, where 0 is for vertex location and 1 is for color.
My main function :
int main(){
Window window(960,540);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
Reader read1("src/shaders/test.vert");
Reader read2("src/shaders/test.frag");
char * r1 = read1.getData();
char * r2 = read2.getData();
GLfloat vert[] = {
0, 0, 0,
0, 3, 0,
8, 3, 0,
8, 0, 0
};
GLushort indices[] = {
0,1,2,
2,3,0
};
GLfloat colors[] = {
1, 0, 1, 1,
1, 0, 1, 1,
1, 0, 1, 1,
1, 0, 1, 1,
};
VertexArray vao;
Buffer* vbo = new Buffer(vert, 4 * 4, 3);
vao.addBuffer(vbo, 0);
vao.addBuffer(new Buffer(colors,4 * 4 , 4), 1);
indexBuffer ibo(indices, 6);
Shader shader(r1, r2);
shader.enable();
shader.setUniformMat4("pr_matrix", mat4::orthographic(0.0f, 16.0f, 0.0f, 9.0f, -1.0f, 1.0f));
shader.setUniformMat4("ml_matrix", mat4::translation(vec3(4, 3, 0)));
shader.setUniform2f("light_pos", vec2(8.0f, 4.5f));
shader.setUniform4f("colour", vec4(0.2, 0.3, 0.8, 1));
while (!window.closed()){
window.clear();
double x, y;
x = window.getX();
y = window.getY();
shader.setUniform2f("light_pos", vec2((float)((x)*16.0f / 960.0f), (float)(9 - 9 * (y) / 540.0f)));
vao.bind();
ibo.bind();
shader.setUniform4f("colour", vec4(0.2, 0.3, 0.8, 1));
shader.setUniformMat4("ml_matrix", mat4::translation(vec3(4, 3, 0)));
glDrawElements(GL_TRIANGLES, ibo.getCount(), GL_UNSIGNED_SHORT, 0);
ibo.unbind();
vao.unbind();
window.update();
}
return 0;
}
My vertex shaders :
#version 410 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec4 color;
uniform mat4 pr_matrix ;
uniform mat4 vw_matrix = mat4(1.0f);
uniform mat4 ml_matrix = mat4(1.0f);
out DATA{
vec4 position;
vec4 color;
} vs_out;
out vec4 pos;
void main(){
gl_Position = pr_matrix * vw_matrix * ml_matrix * vec4(position,1) ;
vs_out.position = ml_matrix * vec4(position,1);
vs_out.color = color;
}
My fragment shaders :
#version 410 core
layout(location = 0) out vec4 color ;
uniform vec4 colour;
uniform vec2 light_pos;
in DATA{
vec4 position;
vec4 color;
} fs_in;
void main(){
float intensity = 1.0f / length(fs_in.position.xy - light_pos);
//color = fs_in.color * intensity;
color = fs_in.color * intensity;
}
My buffer class in case its needed to be corrected:
Buffer::Buffer(GLfloat *data, GLsizei count, GLuint compCountExt) : compCount (compCountExt) {
glGenBuffers(1, &bufferId);
glBindBuffer(GL_ARRAY_BUFFER,bufferId);
glBufferData(GL_ARRAY_BUFFER, count* sizeof(GLfloat), data, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Buffer::bind() const {
glBindBuffer(GL_ARRAY_BUFFER, bufferId);
}
void Buffer::unbind() const {
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
EDIT:
code of the vertexArray Class:
VertexArray::VertexArray(){
glGenVertexArrays(1,&arrayID);
}
void VertexArray::bind() const{
glBindVertexArray(arrayID);
}
void VertexArray::unbind() const{
glBindVertexArray(0);
}
VertexArray::~VertexArray(){
}
void VertexArray::addBuffer(Buffer* buffer, GLuint index){
bind();
glBindBuffer(GL_ARRAY_BUFFER, arrayID);
glEnableVertexAttribArray(index);
glVertexAttribPointer(index, buffer->getComCount(), GL_FLOAT, GL_FALSE, 0, 0);
buffer->unbind();
unbind();
}
there are calls to vertex attrib pointer in this class.
glVertexAttribPointer refers to the currently bound array buffer. This means you have to bind the array buffer befor you use glVertexAttribPointer:
void VertexArray::addBuffer(Buffer* buffer, GLuint index){
bind();
// glBindBuffer(GL_ARRAY_BUFFER, arrayID); <---- skip
buffer->bind(); // <---- bind the array buffer
glEnableVertexAttribArray(index);
glVertexAttribPointer(index, buffer->getComCount(), GL_FLOAT, GL_FALSE, 0, 0);
buffer->unbind();
unbind();
}
See OpenGL 4.6 Specification - 10.3.9 Vertex Arrays in Buffer Objects:
A buffer object binding point is added to the client state associated with each
vertex array index. The commands that specify the locations and organizations of vertex arrays copy the buffer object name that is bound to ARRAY_BUFFER to
the binding point corresponding to the vertex array index being specified. For example, the VertexAttribPointer command copies the value of ARRAY_BUFFER_BINDING.

OpenGL - vertex color in shader gets swapped

I'm trying to send colors to the shader but the colors get swapped,
I send 0xFF00FFFF (magenta) but I get 0xFFFF00FF (yellow) in the shader.
I think is happening something like this, by experimenting:
My vertex shader:
#version 330 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
out DATA
{
vec4 position;
vec3 normal;
vec4 color;
} vs_out;
void main()
{
gl_Position = pr_matrix * vw_matrix * ml_matrix * position;
vs_out.position = position;
vs_out.color = color;
vs_out.normal = normalize(mat3(ml_matrix) * normal);
}
And the fragment shader:
#version 330 core
layout(location = 0) out vec4 out_color;
in DATA
{
vec3 position;
vec3 normal;
vec4 color;
} fs_in;
void main()
{
out_color = fs_in.color;
//out_color = vec4(fs_in.color.y, 0, 0, 1);
//out_color = vec4((fs_in.normal + 1 / 2.0), 1.0);
}
Here is how I set up the mesh:
struct Vertex_Color {
Vec3 vertex;
Vec3 normal;
GLint color; // GLuint tested
};
std::vector<Vertex_Color> verts = std::vector<Vertex_Color>();
[loops]
int color = 0xFF00FFFF; // magenta, uint tested
verts.push_back({ vert, normal, color });
glBufferData(GL_ARRAY_BUFFER, verts.size() * sizeof(Vertex_Color), &verts[0], GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, normal)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, color)));
glEnableVertexAttribArray(2);
Here are some examples:
I can't figure it out what's wrong. Thanks in advance.
Your code is reinterpreting an int as 4 consecutive bytes in memory. The internal encoding for int (and all other types) is machine-specific. In your case, you got 32 bit integers stored in little endian byte order, which is kind of the typical case for PC environments.
You could use an array like GLubyte color[4] to explicitely get a defined memory layout.
If you really want to use an integer type, you could send the data as a an integer attribute with glVertexAttribIPointer (note the I there) and use unpackUnorm4x8 om the shader to get a normalized float vector. However, that requires at least GLSL 4.10, and might be less efficient than the standard approach.

Vertex pos/normal/coord not rendering correctly

I'm having trouble sending both normals and a u,v pair to my shaders. If I remove the normal, things work as expected.
EDIT
It appears the v_normal is receiving the values that are intended for v_coord. I still have no idea though.
/EDIT
This is my vertex:
struct Vertex{
Vertex(vec3 const & v) : pos(v) {}
vec3 pos;
vec3 normal;
real u, v;
};
This is the initialization code:
const int VERTEX_POS_INDX = 0;
const int VERTEX_NORMAL_INDX = 1;
const int VERTEX_TEXCOORD_INDX = 2;
const int VERTEX_POS_SIZE = 3;
const int VERTEX_NORMAL_SIZE = 3;
const int VERTEX_TEXCOORD_SIZE = 2;
GLuint vbo, ibo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sphere->vertices.size()*sizeof(Vertex), &sphere->vertices[0], GL_STATIC_DRAW);
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sphere->indices.size()*sizeof(unsigned short), &sphere->indices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray ( VERTEX_POS_INDX );
glEnableVertexAttribArray ( VERTEX_NORMAL_INDX );
glEnableVertexAttribArray ( VERTEX_TEXCOORD_INDX );
int offset = 0;
glVertexAttribPointer ( VERTEX_POS_INDX, VERTEX_POS_SIZE, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
offset += VERTEX_POS_SIZE * sizeof(real);
glVertexAttribPointer ( VERTEX_NORMAL_INDX, VERTEX_NORMAL_SIZE, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
offset += VERTEX_NORMAL_SIZE * sizeof(real);
glVertexAttribPointer ( VERTEX_TEXCOORD_INDX, VERTEX_TEXCOORD_INDX, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
glBindAttribLocation ( programObject, VERTEX_POS_INDX, "a_position" );
glBindAttribLocation ( programObject, VERTEX_NORMAL_INDX, "a_normal" );
glBindAttribLocation ( programObject, VERTEX_TEXCOORD_INDX, "a_coord" );
The vertex shader:
precision highp float;
uniform mat4 u_mv;
uniform mat4 u_mvp;
uniform vec3 u_light;
uniform vec3 u_up;
attribute vec3 a_position;
attribute vec2 a_coord;
attribute vec3 a_normal;
varying vec2 v_coord;
varying vec3 v_normal;
void main() {
v_coord = a_coord;
v_normal = a_normal;
gl_Position = u_mvp * vec4(a_position, 1);
}
The fragment shader:
precision highp float;
uniform vec3 u_up;
varying vec3 v_normal;
varying vec2 v_coord;
precision highp float;
uniform vec3 u_up;
varying vec3 v_normal;
varying vec2 v_coord;
void main()
{
vec2 coord = v_coord;
vec3 normal = v_normal;
coord.x = mod(v_coord.x * 5.0, 1.0);
coord.y = mod(v_coord.y * 5.0, 1.0);
gl_FragColor = vec4 (
mod(coord.x*1.0,1.0),
mod(coord.y*1.0,1.0),
mod(normal.z*5.0,1.0)*0.0,
1.0 );
}
I just had to use glGetAttributeLocation to define the indexes instead of assuming that glBindAttributeLocation will do the trick.
EDIT
Using the bind attribute function before linking the program did the trick and preserved my intentions with the code.