i need help to convert the render function of IQM model (void renderiqm()) to modern OpenGL (without the deprecated functions like enableClientState(), etc).
The code of renderiqm() function is here:
https://github.com/lsalzman/iqm/blob/master/demo/gpu-demo.cpp
and these are the shaders:
Vertex Shaders:
"#version 120\n"
"#ifdef GL_ARB_uniform_buffer_object\n"
" #extension GL_ARB_uniform_buffer_object : enable\n"
" layout(std140) uniform animdata\n"
" {\n"
" uniform mat3x4 bonemats[80];\n"
" };\n"
"#else\n"
" uniform mat3x4 bonemats[80];\n"
"#endif\n"
"attribute vec4 vweights;\n"
"attribute vec4 vbones;\n"
"attribute vec4 vtangent;\n"
"void main(void)\n"
"{\n"
" mat3x4 m = bonemats[int(vbones.x)] * vweights.x;\n"
" m += bonemats[int(vbones.y)] * vweights.y;\n"
" m += bonemats[int(vbones.z)] * vweights.z;\n"
" m += bonemats[int(vbones.w)] * vweights.w;\n"
" vec4 mpos = vec4(gl_Vertex * m, gl_Vertex.w);\n"
" gl_Position = gl_ModelViewProjectionMatrix * mpos;\n"
" gl_TexCoord[0] = gl_MultiTexCoord0;\n"
" mat3 madjtrans = mat3(cross(m[1].xyz, m[2].xyz), cross(m[2].xyz, m[0].xyz), cross(m[0].xyz, m[1].xyz));\n"
" vec3 mnormal = gl_Normal * madjtrans;\n"
" vec3 mtangent = vtangent.xyz * madjtrans; // tangent not used, just here as an example\n"
" vec3 mbitangent = cross(mnormal, mtangent) * vtangent.w; // bitangent not used, just here as an example\n"
" gl_FrontColor = gl_Color * (clamp(dot(normalize(gl_NormalMatrix * mnormal), gl_LightSource[0].position.xyz), 0.0, 1.0) * gl_LightSource[0].diffuse + gl_LightSource[0].ambient);\n"
"}\n",
Fragment Shader:
"uniform sampler2D tex;\n"
"void main(void)\n"
"{\n"
" gl_FragColor = gl_Color * texture2D(tex, gl_TexCoord[0].xy);\n"
"}\n",
i tried to convert this to modern opengl (vbo, vao, shader version 330, etc), but i dont know which is the fail. Can you show me the correct form to convert it to modern opengl?
PD: im using opengl version 3.3.
Add Important information:
Ok, here is my renderiqm() modified function :
void renderiqm(GLSLProgram& mainShader)
{
m_matrix->loadIdentity();
matrixTransformations();
mainShader.sendUniform("modelYAxisDiff",getYAxisDiff());
mainShader.sendUniform4x4("modelMatrix", m_matrix->getModelMatrix());
//envía la matriz de normales
// mainShader.sendUniform4x4("normalMatrix", m_matrix->getNormalMatrix());
mainShader.sendUniform3x3("normalMatrix", glm::value_ptr(m_matrix->getNormalMatrix()));
//envía la matriz de transformación (modelview)
mainShader.sendUniform4x4("modelViewMatrix", glm::value_ptr(m_matrix->getModelViewMatrix()));
mainShader.sendUniform("objColor", m_color);
m_matrix->pushMatrix(MODEL_MATRIX);
glBindBuffer(GL_UNIFORM_BUFFER, ubo);
glBufferData(GL_UNIFORM_BUFFER, ubosize, NULL, GL_STREAM_DRAW);
glBufferSubData(GL_UNIFORM_BUFFER, bonematsoffset, numjoints*sizeof(Matrix3x4), outframe[0].a.v);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glBindBufferBase(GL_UNIFORM_BUFFER, 0, ubo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
vertex *vert = NULL;
GLsizei stride = sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent)+sizeof(vert->texcoord);
if(numframes > 0)
{
stride+= sizeof(vert->blendindex)+sizeof(vert->blendweight);
}
//vertices
glEnableVertexAttribArray(0);
glVertexAttribPointer(0,3, GL_FLOAT, GL_FALSE, stride, 0);
//normales
glEnableVertexAttribArray(1);
glVertexAttribPointer(1,3,GL_FLOAT, GL_FALSE, stride, (void*)sizeof(vert->position));
//tangente
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 4, GL_FLOAT, GL_FALSE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal));
//texturas
glEnableVertexAttribArray(3);
glVertexAttribPointer(3, 2, GL_FLOAT, GL_FALSE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent));
if(numframes > 0)
{
//blendweight
glEnableVertexAttribArray(4);
glVertexAttribPointer(4, 4, GL_UNSIGNED_BYTE, GL_TRUE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent)+sizeof(vert->texcoord));
//blendindex
glEnableVertexAttribArray(5);
glVertexAttribPointer(5, 4, GL_UNSIGNED_BYTE, GL_FALSE, stride, (void*)sizeof(vert->position)+sizeof(vert->normal)+sizeof(vert->tangent)+sizeof(vert->texcoord)+sizeof(vert->blendindex));
}
iqmtriangle *tris = NULL;
//DEBUG
int i=0;
for(i = 0; i < m_textureIndices.size(); i++)
{
iqmmesh &m = meshes[i];
glBindTexture(GL_TEXTURE_2D, textures[i] ? textures[i] : notexture);
tTextures[i].bindTexture(0);
glDrawElements(GL_TRIANGLES, 3*m.num_triangles, GL_UNSIGNED_INT, &tris[m.first_triangle]);
}
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
glDisableVertexAttribArray(3);
if(numframes > 0)
{
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(5);
}
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
//glPopMatrix();
m_matrix->popMatrix(MODEL_MATRIX);
}
Clarification:
mainShader is a tested shader interface that works fine (no errors here),
m_matrix is a GLM interfaces and also works fine.
the vertexShader would be this one:
string vAnimationShader = "\n\
#version 330 \n\
\n\
uniform mat4 projectionMatrix; \n\
uniform mat4 modelViewMatrix; \n\
uniform mat4 normalMatrix; \n\
uniform mat4 modelMatrix; \n\
uniform vec3 objColor; \n\
\n\
uniform mat4 DepthBiasMVP; \n\
smooth out vec4 ShadowCoord;\n\
\n\
smooth out vec2 texCoord; \n\
smooth out vec3 vNormal; \n\
smooth out vec3 vEyeSpacePos; \n\
smooth out vec3 vWorldPos; \n\
smooth out vec3 vObjColor; \n\
\n\
layout (location = 0) in vec3 inPosition; \n\
layout (location = 3) in vec2 inCoord; \n\
layout (location = 1) in vec3 inNormal; \n\
\n\
uniform mat3x4 bonemats[80];\n\
\n\
layout (location = 4) in vec4 vweights;\n\
layout (location = 5) in vec4 vbones;\n\
layout (location = 2) in vec4 vtangent;\n\
void main(void)\n\
{\n\
vObjColor = objColor; \n\
mat3x4 m = bonemats[int(vbones.x)] * vweights.x;\n\
m += bonemats[int(vbones.y)] * vweights.y;\n\
m += bonemats[int(vbones.z)] * vweights.z;\n\
m += bonemats[int(vbones.w)] * vweights.w;\n\
vec4 vEyeSpacePosVertex = modelViewMatrix*vec4(inPosition, 1.0); \n\
gl_Position = projectionMatrix*vEyeSpacePosVertex; \n\
mat3 madjtrans = mat3(cross(m[1].xyz, m[2].xyz), cross(m[2].xyz, m[0].xyz), cross(m[0].xyz, m[1].xyz));\n\
vec3 mnormal = inNormal * madjtrans;\n\
vec3 mtangent = vtangent.xyz * madjtrans; // tangent not used, just here as an example\n\
vec3 mbitangent = cross(mnormal, mtangent) * vtangent.w; // bitangent not used, just here as an example\n\
texCoord = inCoord; \n\
vec4 vRes = normalMatrix*vec4(inNormal, 0.0); \n\
vNormal = vRes.xyz; \n\
vEyeSpacePos = vEyeSpacePosVertex.xyz; \n\
vec4 vWorldPosVertex = modelMatrix*vec4(inPosition, 1.0); \n\
vWorldPos = vWorldPosVertex.xyz; \n\
ShadowCoord = DepthBiasMVP * vWorldPosVertex; \n\
}\n\
";
i didn't find the error... in the screen i dont see the iqm model.. all the other things draw perfectly, but iqm model doesn't appear.
IMPORTANT DISCOVERING:
recently, i have discovered that the problem occurs when the Opengl Context is 3.2+ . if I switch to the 3.1 Version of the opengl context , it works perfectly with my modified function.
i didn't find what the problem is... i associate that with a deprecated function but i cant findit. Please, can you help me? Thanks a lot.
The problem was the initialization of vbo without a vao, i fixed it and now the model is rendering correctly.
Related
I am trying to get a basic geometry shader to work, but I am completely failing. After checking numerous resources, I still cannot find a solution to my problem.
Here is my code for my vertex, geometry, and fragment shaders.
Vertex Shader:
#version 330 core
// Vertex Shader Inputs
layout (location = 0) in vec3 Pos;
layout (location = 1) in vec3 Norm;
layout (location = 2) in vec3 Color;
// Vertex to Fragment Shader Outputs
out DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_out;
// Main.cpp Imports
uniform mat4 camMatrix; // viewProjection Matrix
uniform mat4 model;
void main()
{
vec3 vsPos = vec3(model * vec4(Pos, 1.0f));
vec3 vsNorm = mat3(transpose(inverse(model))) * Norm; // Normal vector correction
vec4 vsColor = vec4(Color, 1.0f);
gl_Position = camMatrix * vec4(vsPos, 1.0f);
}
Geometry Shader:
#version 330 core
layout (triangles) in;
layout (triangle_strip, max_vertices = 3) out;
out vec3 gsPos;
out vec3 gsNorm;
out vec3 gsColor;
in DATA {
vec3 vsPos;
vec3 vsNorm;
vec4 vsColor;
} data_in[];
uniform mat4 camMatrix;
void main()
{
for (int i=0; i<3; i++)
{
gsPos = data_in[i].vsPos;
gsNorm = data_in[i].vsNorm;
gsColor = data_in[i].vsColor;
gl_Position = camMatrix * vec4(data_in[i].vsPos, 1.0f);
EmitVertex();
}
EndPrimitive();
}
Fragment Shader:
#version 330 core
out vec4 FragColor;
// Fragment Shader Inputs
in vec3 gsPos;
in vec3 gsNorm;
in vec4 gsColor;
// Fragment Shader Uniforms
uniform sampler2D diffuse0;
uniform sampler2D specular0;
uniform vec4 lightColor;
uniform vec3 lightPos;
uniform vec3 camPos;
vec4 pointLight()
{
vec3 lightVec = (lightPos - vsPos);
// intensity of light with respect to distance
float dist = length(lightVec);
float a = 0.7;
float b = 0.4;
float c = 1.0;
float inten = 1.0f / (a * dist * dist + b * dist + c);
// ambient lighting
float ambient = 0.75f;
// diffuse lighting
vec3 fsNorm = normalize(gsNorm);
vec3 lightDirection = normalize(lightVec);
float diffuse = max(dot(fsNorm, lightDirection), 0.0f);
// specular lighting
float specular = 0.0f;
if (diffuse != 0.0f)
{
float specularLight = 0.50f;
vec3 viewDirection = normalize(gsNorm - gsPos);
vec3 halfwayVec = normalize(viewDirection + lightDirection);
float specAmount = pow(max(dot(fsNorm, halfwayVec), 0.0f), 32);
specular = specAmount * specularLight;
};
return inten * (gsColor * (diffuse + ambient) + gsColor * specular) * lightColor;
}
void main()
{// outputs final color
FragColor = pointLight();
}
My mesh generation function:
void genMesh()
{
VAO.Bind();
VBO VBO(vtx);
EBO EBO(idx);
VAO.LinkAttrib(VBO, 0, 3, GL_FLOAT, sizeof(Vertex), (void*)0);
VAO.LinkAttrib(VBO, 1, 3, GL_FLOAT, sizeof(Vertex), (void*)(3 * sizeof(float)));
VAO.LinkAttrib(VBO, 2, 4, GL_FLOAT, sizeof(Vertex), (void*)(6 * sizeof(float)));
VAO.Unbind();
VBO.Unbind();
EBO.Unbind();
};
My mesh draw function:
void Mesh::Draw(Shader& shader, Camera& camera)
{
shader.Activate();
VAO.Bind();
// Take care of the camera Matrix
glUniform3f(glGetUniformLocation(shader.ID, "camPos"),
camera.Position.x,
camera.Position.y,
camera.Position.z);
camera.Matrix(shader, "camMatrix");
// Draw the actual mesh
glDrawElements(GL_TRIANGLES, idx.size() * sizeof(GLuint), GL_UNSIGNED_INT, 0);
};
I call my mesh generation function outside of the main while loop, then I draw the mesh in my main while loop.
Debugging my program through RenderDoc gives me the error, "No vertex shader bound at draw!" Without the geometry shader (keeping everything else roughly the same), I do not get any errors in RenderDoc. I tried updating my graphics drivers, but I am just getting the same error. Please help me, I feel like I am losing my mind.
In the fragment shader gsColor is defined at a vec4 variable but in the geometry shader it is declared a vec3 variable
I'm writing a small "engine", and the time has finally come to implement transformations. However, when I try to glGetUniformLocation, it return -1. Here is my rendering method:
void GFXRenderer::submit(EntityBase* _entity, GPUProgram _program) {
if(_entity->mesh.has_value()) {
mat4 mod_mat(1.0);
//mod_mat = translate(mod_mat, _entity->transform.position);
/*
mod_mat = scale(mod_mat, _entity->transform.scale);
mod_mat = rotate(mod_mat, radians(_entity->transform.rotation.x), vec3(1.0, 0.0, 0.0));
mod_mat = rotate(mod_mat, radians(_entity->transform.rotation.y), vec3(0.0, 1.0, 0.0));
mod_mat = rotate(mod_mat, radians(_entity->transform.rotation.z), vec3(0.0, 0.0, 1.0));
*/
mod_mat = translate(mod_mat, vec3(0.5f, -0.5f, 0.0f));
//mod_mat = glm::rotate(mod_mat, (float)glfwGetTime(), glm::vec3(0.0f, 0.0f, 1.0f));
glUseProgram(_program.id);
int transform = glGetUniformLocation(_program.vsh.id, "transform");
std::cout << transform << std::endl;
glUniformMatrix4fv(transform, 1, GL_FALSE, value_ptr(mod_mat));
glUseProgram(_program.id);
glBindTexture(GL_TEXTURE_2D, _entity->mesh->tex_id);
glBindVertexArray(_entity->mesh->vao);
glDrawElements(GL_TRIANGLES, _entity->mesh->indices.size(), GL_UNSIGNED_INT, 0);
glUseProgram(0);
glBindVertexArray(0);
}
}
Here EntityBase is an object class. It contains a transform class, as follows:
class Transform {
public:
vec3 position;
vec3 rotation;
vec3 scale;
quat q_rot;
mat4x4 matrix;
};
Ignore the quaternion and matrix. ALso, I must mention that without doing transformation - it renders flawlessly. (SIKE)
Here is my vsh :
#version 460 core
layout (location = 0) in vec3 aPos;
layout (location = 1) in vec2 aTex;
out vec2 tex_coord;
out mat4 f_tr_opt; // for stopping optimization
uniform mat4 transform;
void main() {
tex_coord = aTex;
f_tr_opt = transform;
gl_Position = transform * vec4(aPos, 1.0);
}
Here is my fsh:
#version 460 core
in vec2 tex_coord;
in mat4 f_tr_opt; // again, same thing
out vec4 FragColor;
uniform sampler2D texture0;
void main() {
mat4 garbage = f_tr_opt * f_tr_opt; // named garbage for easier recognition
FragColor = texture(texture0, tex_coord);
}
I check for compile and linking errors, all is fine. Please correct me as to what I am doing wrong here.
See glGetUniformLocation. The uniform location must be requested from the linked program object, not from the (vertex) shader object:
int transform = glGetUniformLocation(_program.vsh.id, "transform");
int transform = glGetUniformLocation(_program.id, "transform");
I'm trying to send colors to the shader but the colors get swapped,
I send 0xFF00FFFF (magenta) but I get 0xFFFF00FF (yellow) in the shader.
I think is happening something like this, by experimenting:
My vertex shader:
#version 330 core
layout(location = 0) in vec4 position;
layout(location = 1) in vec3 normal;
layout(location = 2) in vec4 color;
uniform mat4 pr_matrix;
uniform mat4 vw_matrix = mat4(1.0);
uniform mat4 ml_matrix = mat4(1.0);
out DATA
{
vec4 position;
vec3 normal;
vec4 color;
} vs_out;
void main()
{
gl_Position = pr_matrix * vw_matrix * ml_matrix * position;
vs_out.position = position;
vs_out.color = color;
vs_out.normal = normalize(mat3(ml_matrix) * normal);
}
And the fragment shader:
#version 330 core
layout(location = 0) out vec4 out_color;
in DATA
{
vec3 position;
vec3 normal;
vec4 color;
} fs_in;
void main()
{
out_color = fs_in.color;
//out_color = vec4(fs_in.color.y, 0, 0, 1);
//out_color = vec4((fs_in.normal + 1 / 2.0), 1.0);
}
Here is how I set up the mesh:
struct Vertex_Color {
Vec3 vertex;
Vec3 normal;
GLint color; // GLuint tested
};
std::vector<Vertex_Color> verts = std::vector<Vertex_Color>();
[loops]
int color = 0xFF00FFFF; // magenta, uint tested
verts.push_back({ vert, normal, color });
glBufferData(GL_ARRAY_BUFFER, verts.size() * sizeof(Vertex_Color), &verts[0], GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)0);
glEnableVertexAttribArray(0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, normal)));
glEnableVertexAttribArray(1);
glVertexAttribPointer(2, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(Vertex_Color), (const GLvoid*)(offsetof(Vertex_Color, color)));
glEnableVertexAttribArray(2);
Here are some examples:
I can't figure it out what's wrong. Thanks in advance.
Your code is reinterpreting an int as 4 consecutive bytes in memory. The internal encoding for int (and all other types) is machine-specific. In your case, you got 32 bit integers stored in little endian byte order, which is kind of the typical case for PC environments.
You could use an array like GLubyte color[4] to explicitely get a defined memory layout.
If you really want to use an integer type, you could send the data as a an integer attribute with glVertexAttribIPointer (note the I there) and use unpackUnorm4x8 om the shader to get a normalized float vector. However, that requires at least GLSL 4.10, and might be less efficient than the standard approach.
Im using OpenGL 3.3 with GLFW.
The problem is that GL_LINE_STRIP and GL_LINE LOOP give the same result.
Here is the array of 2D coordinates:
GLfloat vertices[] =
{
0, 0,
1, 1,
1, 2,
2, 2,
3, 1,
};
The attribute pointer:
// Position attribute 2D
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), (GLvoid*)0);
glEnableVertexAttribArray(0);
And finally:
glDrawArrays(GL_LINE_STRIP, 0, sizeof(vertices)/4);
Vertex shader:
#version 330 core
layout (location = 0) in vec2 position;
layout (location = 1) in vec3 color;
out vec3 ourColor;
uniform mat4 model;
uniform mat4 view;
uniform mat4 projection;
void main()
{
gl_Position = projection * view * model * vec4(position, 0.0f, 1.0f);
ourColor = color;
}
Fragment shader:
#version 330 core
in vec3 ourColor;
out vec3 color;
void main()
{
color = vec3(ourColor);
}
The Color attrib. is disabled (lines are black and visible)
Any idea?
You have only 5 pairs of floats, so 5 vertices. Total size of your array is 4 times 10 floats, so 40 bytes.
Your equation for count, 40/4 gives 10. sizeof(array) / (sizeof(array[0]) * dimensionality) would be the correct equation there.
I'm having trouble sending both normals and a u,v pair to my shaders. If I remove the normal, things work as expected.
EDIT
It appears the v_normal is receiving the values that are intended for v_coord. I still have no idea though.
/EDIT
This is my vertex:
struct Vertex{
Vertex(vec3 const & v) : pos(v) {}
vec3 pos;
vec3 normal;
real u, v;
};
This is the initialization code:
const int VERTEX_POS_INDX = 0;
const int VERTEX_NORMAL_INDX = 1;
const int VERTEX_TEXCOORD_INDX = 2;
const int VERTEX_POS_SIZE = 3;
const int VERTEX_NORMAL_SIZE = 3;
const int VERTEX_TEXCOORD_SIZE = 2;
GLuint vbo, ibo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sphere->vertices.size()*sizeof(Vertex), &sphere->vertices[0], GL_STATIC_DRAW);
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sphere->indices.size()*sizeof(unsigned short), &sphere->indices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray ( VERTEX_POS_INDX );
glEnableVertexAttribArray ( VERTEX_NORMAL_INDX );
glEnableVertexAttribArray ( VERTEX_TEXCOORD_INDX );
int offset = 0;
glVertexAttribPointer ( VERTEX_POS_INDX, VERTEX_POS_SIZE, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
offset += VERTEX_POS_SIZE * sizeof(real);
glVertexAttribPointer ( VERTEX_NORMAL_INDX, VERTEX_NORMAL_SIZE, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
offset += VERTEX_NORMAL_SIZE * sizeof(real);
glVertexAttribPointer ( VERTEX_TEXCOORD_INDX, VERTEX_TEXCOORD_INDX, GL_FLOAT, GL_FALSE, sizeof(Vertex), (void*)offset );
glBindAttribLocation ( programObject, VERTEX_POS_INDX, "a_position" );
glBindAttribLocation ( programObject, VERTEX_NORMAL_INDX, "a_normal" );
glBindAttribLocation ( programObject, VERTEX_TEXCOORD_INDX, "a_coord" );
The vertex shader:
precision highp float;
uniform mat4 u_mv;
uniform mat4 u_mvp;
uniform vec3 u_light;
uniform vec3 u_up;
attribute vec3 a_position;
attribute vec2 a_coord;
attribute vec3 a_normal;
varying vec2 v_coord;
varying vec3 v_normal;
void main() {
v_coord = a_coord;
v_normal = a_normal;
gl_Position = u_mvp * vec4(a_position, 1);
}
The fragment shader:
precision highp float;
uniform vec3 u_up;
varying vec3 v_normal;
varying vec2 v_coord;
precision highp float;
uniform vec3 u_up;
varying vec3 v_normal;
varying vec2 v_coord;
void main()
{
vec2 coord = v_coord;
vec3 normal = v_normal;
coord.x = mod(v_coord.x * 5.0, 1.0);
coord.y = mod(v_coord.y * 5.0, 1.0);
gl_FragColor = vec4 (
mod(coord.x*1.0,1.0),
mod(coord.y*1.0,1.0),
mod(normal.z*5.0,1.0)*0.0,
1.0 );
}
I just had to use glGetAttributeLocation to define the indexes instead of assuming that glBindAttributeLocation will do the trick.
EDIT
Using the bind attribute function before linking the program did the trick and preserved my intentions with the code.