Glsl skinning obstacle, who can jump that? - c++

I'm currently trying to set up a GPU skinning (with glsl) but it's not working the way I would :) Actually it's not working at all. My mesh disappear when I try this glsl code :
layout(location = 0) in vec3 vertexPos;
layout(location = 1) in vec2 vertexUv;
layout(location = 2) in vec3 vertexNor;
layout(location = 5) in ivec4 joints_influences;
layout(location = 6) in vec4 weights_influences;
uniform mat4 ViewProj, View, Proj, Model;
out vec3 vertexPosEye;
out vec3 vertexNorEye;
const int MAX_INFLUENCES = 4;
const int MAX_BONES = 50;
uniform mat4 animation_matrices[MAX_BONES];
uniform mat4 inv_bind_matrices[MAX_BONES];
void main()
{
vertexPosEye = (View * Model * vec4(vertexPos, 1)).xyz; // Position
vertexNorEye = (View * Model * vec4(vertexNor, 0)).xyz; // Normal matrix
vec4 final_v = vec4(0, 0, 0, 0);
for (int i = 0; i < MAX_INFLUENCES; i++)
{
vec4 v = vec4(vertexPos, 1)
* inv_bind_matrices[joints_influences[i]]
* animation_matrices[joints_influences[i]]
* weights_influences[i];
final_v += v;
}
gl_Position = ViewProj * Model * final_v;
}
when I try this :
gl_Position = ViewProj * Model * vertexPos;
My mesh is back :) but no animations anymore of course...
Here's my application (c++) code when I set VBO attributes :
// Vertex position
glGenBuffers(1, &buffer[0]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[0]);
glBufferData(GL_ARRAY_BUFFER, vertices.pos.size() * sizeof(bVector3), &vertices.pos[0], GL_STATIC_DRAW);
// Ibid for uv, normals, tangents and bitangents.
// Skinning : joints index
glGenBuffers(1, &buffer[5]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[5]);
glBufferData(GL_ARRAY_BUFFER, vertices.joints.size() * sizeof(SkinningJoints), &vertices.joints[0], GL_STATIC_DRAW);
// Skinning : weights
glGenBuffers(1, &buffer[6]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[6]);
glBufferData(GL_ARRAY_BUFFER, vertices.weights.size() * sizeof(SkinningWeights), &vertices.weights[0], GL_STATIC_DRAW);
// Indices
glGenBuffers(1, &buffer[7]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[7]);
glBufferData(GL_ARRAY_BUFFER, vertices.indices.size() * sizeof(bUshort), &vertices.indices[0], GL_STATIC_DRAW);
In the main loop :
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(0));
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
glEnableVertexAttribArray(for uv, normals, tangents and bitangents)...
glEnableVertexAttribArray(5);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(5));
glVertexAttribPointer(5, 4, GL_INT, GL_FALSE, 0, BUFFER_OFFSET(0));
glEnableVertexAttribArray(6);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(6));
glVertexAttribPointer(6, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m->GetBuffer(7));
glDrawElements(GL_TRIANGLES, m->vertices.indices.size(), GL_UNSIGNED_SHORT, BUFFER_OFFSET(0));
Here is my RenderingVertices struct (after Barr's recomendations):
struct RenderingVertices
{
// std::vector<Vec3>
vVec3 pos, nor, tang, btan;
vVec2 uv;
vUshort indices;
vector<SkinningJoints> joints;
vector<SkinningWeights> weights;
};
And here is my SkinningJoints struct :
struct SkinningJoints
{
int j[MAX_BONES_PER_VERT];
SkinningJoints(Vertex::Weights weights[MAX_BONES_PER_VERT])
{
for (bUint i = 0; i < MAX_BONES_PER_VERT; i++)
j[i] = weights[i].jid;
}
};
My SkinningWeights struct is almost the same, with an array of float instead of int.
Now when I try to debug the joints index, weights values and final vertex as colors, here is what I get :
// Shader
color_debug = joints_influences;
http://www.images-host.fr/view.php?img=00021800pop.jpg
color_debug = weights_influences;
http://www.images-host.fr/view.php?img=00021800pop2.jpg
Another interesting thing, when I try this :
vec4 pop = vec4(vertexPos, 1) * animation_matrices[1] * inv_bind_matrices[1] * 1.0;
gl_Position = ViewProj * Model * pop;
My all mesh is actually rotating, which means that my uniform animation_matrices is good.
Anyone can see what i'm doing wrong here ?

I finally got it working. For those who may be interested, here is what I was doing wrong :
When I send joints indices array to Glsl, instead of doing this:
glEnableVertexAttribArray(5);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(5));
glVertexAttribPointer(5, 4, GL_INT, GL_FALSE, 0, BUFFER_OFFSET(0));
I needed to do this:
glEnableVertexAttribArray(5);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(5));
glVertexAttribIPointer(5, 4, GL_INT, 0, BUFFER_OFFSET(0));
You have to look closely to find the difference. Instead of calling glVertexAttribPointer(), I needed to call glVertexAttribIPointer() because joints indices are int.
Hope this will help someone someday.

Did you try debugging your skinning attributes? Output the vertex weight as colors so that you can confirm you have meaningful values? If everything is black you'll know where to look.
From a quick glance at your RenderingVertices I can spot a first problem. You are passing a Vector of pointers to GL which I don't think is what you want to do.
Most of the time you will limit skinning influences to 4 joint/weight pairs per vertex. So you can get away with a simple array (ie. SkinningJoints joints[4];).

Related

How to read vertex texcoords with OpenMesh

I use OpenGL + OpenMesh to render a 3d mesh
My mesh reading and processing code:
void read(MyMesh &model, const string &path) {
model.request_vertex_colors();
model.request_face_normals();
OpenMesh::IO::Options opt;
opt += OpenMesh::IO::Options::VertexColor;
opt += OpenMesh::IO::Options::FaceNormals;
if (!OpenMesh::IO::read_mesh(model, path, opt)) {
cerr << "read error\n";
exit(1);
}
// fill the vertices and normals array
for (auto f : model.faces())
for (auto v : model.fv_range(f)){ // how can I get vertex texcoords based on face?
vertices.push_back(model.point(v));
normals.push_back(model.normal(f));
colors.push_back(model.color(v));
}
model.release_vertex_colors();
model.release_face_normals();
}
But I find that I can't get vertex texcoords based on the face in OpenMesh.
It seems there isn't a function called model.texcoords(face_handle, vertex_handle) to let me get the vertex texcoords on this face.
Or can I get the texcoords index of each face like these in obj file:
f 481/1/1 480/2/1 24/3/1 25/4/1
f 477/5/2 7/6/2 17/7/2 18/8/2
f 1/9/3 326/10/3 11/11/3
f 482/12/4 481/1/4 25/4/4
f 478/13/5 477/5/5 18/8/5 19/14/5
f 2/15/6 1/9/6 11/11/6 12/16/6
f 479/17/7 478/13/7 19/14/7 20/18/7
f 3/19/8 2/15/8 12/16/8 13/20/8
f 8/21/9 479/17/9 20/18/9 21/22/9
I use glDrawArrays(...) to draw mesh.
Declarations
vector<MyMesh::Point> vertices; // `MyMesh::Point` is like float3 struct which has 3 float numbers in it
vector<MyMesh::Point> normals;
vector<MyMesh::Point> colors;
unsigned int model_vbo[3], model_vao;
Data bind
glGenBuffers(3, model_vbo);
glGenVertexArrays(1, &model_vao);
glBindBuffer(GL_ARRAY_BUFFER, model_vbo[0]);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * 3 * sizeof(float), vertices.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, model_vbo[1]);
glBufferData(GL_ARRAY_BUFFER, colors.size() * sizeof(float), colors.data(), GL_DYNAMIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, model_vbo[2]);
glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(float), normals.data(), GL_DYNAMIC_DRAW);
glBindVertexArray(model_vao);
glBindBuffer(GL_ARRAY_BUFFER, model_vbo[0]);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), nullptr);
glBindBuffer(GL_ARRAY_BUFFER, model_vbo[1]);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), nullptr);
glBindBuffer(GL_ARRAY_BUFFER, model_vbo[2]);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), nullptr);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, 0);
Draw loop
s.use();
s.setMat4("model", Rotate * Scale);
s.setMat4("projection", cam.projection());
s.setMat4("view", cam.view());
glBindVertexArray(model_vao);
glDrawArray(GL_TRIANGLES, 0, vertices.size() / 3);
vertex shader
#version 450 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec3 normal;
uniform mat4 model;
uniform mat4 projection;
uniform mat4 view;
out vec3 Color;
out vec3 Normal;
void main(){
Normal = normal;
Color = color;
gl_Position = projection * view * model * vec4(position, 1.0);
}
fragment shader
#version 450 core
out vec4 scene_color;
in vec3 Color; // vertex color
in vec3 Normal;
void main(){
scene_color = vec4(Color,1.0);
}

opengl shader for drawing feature point in slam [closed]

Closed. This question needs debugging details. It is not currently accepting answers.
Edit the question to include desired behavior, a specific problem or error, and the shortest code necessary to reproduce the problem. This will help others answer the question.
Closed 2 years ago.
Improve this question
I want to draw points using opengl shader.
Now my code using glvertex3f(pos.x, pos.y, pos.z) but when too many point to draw using it, it slow. So I want to using shader and glDrawarrays. But its not work. please check my code.
original code :
for (const auto lm : landmarks) {
const openvslam::Vec3_t pos_w = lm->get_pos_in_world();
glColor3ub(lm->color_[0], lm->color_[1], lm->color_[2]);
glVertex3f(pos_w.cast<float>().eval().x(),pos_w.cast<float>().eval().y(), pos_w.cast<float>().eval().z());
}
my code :
for (const auto lm : landmarks) {
const openvslam::Vec3_t pos_w = lm->get_pos_in_world();
int buffer_size = local_landmarks.size();
glGenBuffers(2, buffers_);
glBindBuffer(GL_ARRAY_BUFFER, buffers_[0]);
glm::vec3 pos_pt = glm::vec3(pos_w.cast<float>().eval().x(),pos_w.cast<float>().eval().y(), pos_w.cast<float>().eval().z());
glBufferData(GL_ARRAY_BUFFER, 3*buffer_size*sizeof(float), &pos_pt , GL_DYNAMIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, buffers_[1]);
glm::vec3 color_pt = glm::vec3(lm->color_[0], lm->color_[1], lm->color_[2]);
glBufferData(GL_ARRAY_BUFFER, buffer_size*3*sizeof(float), &color_pt, GL_DYNAMIC_DRAW);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(1);
Eigen::Matrix4f mvp = s_cam_shader_opengl->GetProjectionModelViewMatrix();
//Eigen::Matrix4f mvp = s_cam_shader_opengl->GetProjectionMatrix() * s_cam_shader_opengl->GetModelViewMatrix();
glUniformMatrix4fv(mvp_location, 1, GL_FALSE, mvp.data());
glPointSize(1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDrawArrays(GL_POINTS, 0, 3*num);
}
vertex shader
#version 460
uniform mat4 mvpMat;
layout (location = 0) in vec3 test_position;
layout (location = 1) in vec3 test_color;
out vec3 colorr;
void main(void){
colorr = test_color;
gl_Position = vec4(test_position,1.0);
}
fragment shader
#version 460
uniform mat4 mvpMat;
in vec3 colorr;
out vec4 frag_color;
void main(void) {
frag_color = vec4(colorr, 1.0);
}
////////////////////////////////////////////////////////////////////////
+edit
I update code but it said segmentation error.
Whats problem?
struct TLandmarkData
{
glm::vec3 pos;
glm::vec3 color;
};
using TLandmarks = std::vector<TLandmarkData>;
TLandmarks landmarks_;
...
code
...
glUseProgram(points_program_);
while(){
...
for (const auto lm : landmarks) {
TLandmarkData aaa;
glm::vec3 pos_pt = glm::vec3(pos_w.cast<float>().eval().x(),pos_w.cast<float>().eval().y(), pos_w.cast<float>().eval().z());
glm::vec3 color_pt = glm::vec3(lm->color_[0], lm->color_[1], lm->color_[2]);
aaa.pos = pos_pt;
aaa.color = color_pt;
landmarks_.push_back(aaa);
}
...
GLuint vbo_;
GLuint vao_;
glGenBuffers(1, &vbo_);
glBindBuffer(GL_ARRAY_BUFFER, vbo_);
glBufferData(GL_ARRAY_BUFFER, landmarks_.size()*sizeof(*landmarks_.data()), landmarks_.data(), GL_STATIC_DRAW);
glGenVertexArrays(1, &vao_);
glBindVertexArray(vao_);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(TLandmarkData), 0);
glEnableVertexAttribArray( 0 );
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(TLandmarkData), (void*)(sizeof(glm::vec3)));
glEnableVertexAttribArray(1);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDrawArrays(GL_POINTS, 0, landmarks_.size());
}
vertex shader
#version 460
layout (location = 0) in vec3 test_position;
layout (location = 1) in vec3 test_color;
out vec3 colorr;
void main(void){
colorr = test_color;
gl_Position = vec4(test_position,1.0);
}
fragment shader
#version 460
in vec3 colorr;
out vec4 frag_color;
void main(void) {
frag_color = vec4(colorr, 1.0);
}
+
What you actually do is to create landmarks.size() buffers rather than 1 buffer. You have to create one single buffer. For the best performance gain you have to create tha buffer once (respectively when it changes only) and to do the world transformation in the shader.
Use the following data structure to represent a point (or a similar aggregate):
struct TLandmarkData
{
glm::vec3 pos;
glm::vec3 color;
};
using TLandmarks = std::vector<TLandmarkData>;
Create a Vertex Array Object and a Vertex Buffer Object (once at initialization):
(See also Vertex Specification)
TLandmarks landmarks;
GLuint vbo_;
GLuint vao_;
glGenBuffers(1, &vbo_);
glBindBuffer(GL_ARRAY_BUFFER, vbo_);
glBufferData(GL_ARRAY_BUFFER, landmarks.size()*sizeof(*landmarks.data()), landmarks.data(), GL_STATIC_DRAW);
glGenVertexArrays(1, &vao_);
glBindVertexArray(vao_);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(TLandmarkData), 0);
glEnableVertexAttribArray( 0 );
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(TLandmarkData), (void*)(sizeof(glm::vec3)));
glEnableVertexAttribArray(1);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
As you can see, you don't need any loop. If the data changes, the buffer (vbo_) can be updated (e.g. glBufferSubData).
When you want to draw the points, then it is sufficient to bind the vertex array object. The count argument to glDrawArrays has to be the number of vertices:
glBindVertexArray(vao_);
glDrawArrays(GL_POINTS, 0, landmarks.size());
Use a Uniform of type mat4, to transform the points to world coordinates in the vertex shader:
#version 460
uniform mat4 mvpMat;
layout (location = 0) in vec3 test_position;
layout (location = 1) in vec3 test_color;
layout (location=0) uniform mat4 worldtransform;
out vec3 colorr;
void main(void){
colorr = test_color;
gl_Position = worldtransform * vec4(test_position,1.0);
}
Set the uniform (update it per frame) by glUniformMatrix4fv after the program is installed by glUseProgram:
glm::mat4 toworld(1.0f);
// set toworld
// [...]
glUseProgram(myProgram);
glUniformMatrix4fv(0, 1, GL_FALSE, glm::value_ptr(toworld));

Discarding random fragments on fragment shader without using uniform variables

I have a very large set of points (~300k) derived from an rgb & depth image and for the sake of obtaining geometric information I have calculated the normal vector for each of those points. However, I can't proceed to any further calculations until I'm certain the normal vectors are correct, so I decided to display each vector with a line:
glm::vec3 lvec = point + normal;
normals[index + 0] = point.x;
normals[index + 1] = point.y;
normals[index + 2] = point.z;
normals[index + 3] = lvec.x;
normals[index + 4] = lvec.y;
normals[index + 5] = lvec.z;
Here point is a vector containing each point's coordinates and normal is its normal vector. I store both ends of the line contiguously in an array and after buffering, I use GL_LINES to draw the data.
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, height * width * 6 * sizeof(GLfloat),
normals, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
draw function:
glDisable(GL_PROGRAM_POINT_SIZE);
glLineWidth(3.0);
//binding shader program and setting uniform variables
glUseProgram(shader);
glUniformMatrix4fv(modelLocation, 1, GL_FALSE, &model[0][0]);
glUniformMatrix4fv(viewLocation, 1, GL_FALSE, &view[0][0]);
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, &projection[0][0]);
//binding vertex array object and drawing
glBindVertexArray(VAO);
glDrawArrays(GL_LINES, 0, width * height * 2);
vertex shader:
#version 330 core
layout(location = 0) in vec3 position;
uniform mat4 model, view, projection;
void main() {
gl_Position = projection * view * model * vec4(position, 1.0);
}
fragment shader:
#version 330 core
out vec4 colour;
void main() {
colour = vec4(1.0, 0.0, 0.0, 1.0);
}
Naturally, there are 300k lines being drawn and the result is too dense for me to understand whether they are correct or not. Is there a way I could randomly discard some of them? There is a single draw call that does the drawing so I can't think of any way to use uniform variables to select which ones get drawn.
Thanks in advance.

Opengl 3+ draw lines with differents colors

I'm using SIFT algorithm and I want to draw lines between keypoints in differents image. I made it, but actually, all my lines have the same color so it's unreadable.
What I want to achieve is to set a random color to each line, but 1 and only 1 color to a line.
I have to use shaders to do that, and so I send LINES and POINTS (that is the color) to the same shader, and I don't know what's wrong in my code (I have a crash when trying to execute my code. EDIT : It is not that something is wrong in my code (well, obviously yes...) but the error cause a crash, like if I had a segmentation fault. So I think my errors is due to a wrong place allocation for my color array (because it worked without this array))
my code :
std::vector<GLfloat> points;
std::vector<glm::vec3> colors;
GLuint VAO, VBO[2];
void setupLines() {
glGenVertexArrays(1, &VAO);
glGenBuffers(2, &VBO[0]);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO[0]);
glBufferData(GL_ARRAY_BUFFER, points.size() * sizeof(GLfloat), &points[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, VBO[1]);
glBufferData(GL_ARRAY_BUFFER, colors.size() * sizeof(glm::vec3), &colors[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER,0);
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
}
void draw() {
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO[0]);
glDrawArrays(GL_LINES, 0, points.size());
//The error occurs here, it seems...
glBindBuffer(GL_ARRAY_BUFFER, VBO[1]);
glDrawArrays(GL_POINTS, 0, colors.size());
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
//for each points, we create the same colors 2by 2 so lines (that are 2 points) have the same colors
void addColor() {
for (int i = 0; i < points.size()/2; ++i) {
float a = rand() / (float)RAND_MAX;
float b = rand() / (float)RAND_MAX;
float c = rand() / (float)RAND_MAX;
colors.push_back(glm::vec3(a, b, c));
colors.push_back(glm::vec3(a, b, c));
}
}
and my vertex Shader :
#version 330 core
layout (location = 0) in vec2 aTexCoord;
layout (location = 1) in vec3 color;
out vec2 TexCoord;
out vec3 Col;
void main()
{
TexCoord = vec2(aTexCoord.xy);
Col = color;
}
and then I use Col in fragment shader to color.
Is it how I have to do this?
You have to set the current position gl_Position in the vertex shader.
The vertex coordinate has to be an attribute:
in vec3 aVertCoord;
and you have to assigne the coordinate to gl_Position:
gl_Position = vec4(aVertCoord.xyz, 1.0);
Note, for 2D coordinates it is should be something like:
in vec2 aVertCoord;
void main()
{
.....
gl_Position = vec4(aVertCoord.xy, 0.0, 1.0);
}
In final you have 2 vertex attributes. The vertex coordinates and the color. You do not need any texture coordinates, because you draw lines (GL_LINES). But I guess what you call aTexCoord is the vertex position, so you have to do it like this:
#version 330 core
layout (location = 0) in vec2 aTexCoord;
layout (location = 1) in vec3 color;
out vec3 TexCoord;
out vec3 Col;
void main()
{
gl_Position = vec4(aTexCoord.xy, 0.0, 1.0);
TexCoord = aTexCoord.xy;
Col = color;
}
The vertex array object VAO stores the states of the generic vertex attributes (glVertexAttribPointer, glEnableVertexAttribArray). The vertex attribute state may refer to a vertex array buffer. You have to bind the vertex array object VAO only, when you draw the object (lines):
void draw() {
glBindVertexArray(VAO);
int numberOfPoints = points.size() / 2; // Number of points, not the number of floats
glDrawArrays(GL_LINES, 0, numberOfPoints );
glBindVertexArray(0);
}
Note, it sufficient to call glDrawArrays one time.
Further, the 1st paramter of glVertexAttribPointer is the attribute index:
glBindBuffer(GL_ARRAY_BUFFER, VBO[1]);
glBufferData(GL_ARRAY_BUFFER, colors.size() * sizeof(glm::vec3), &colors[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(
1, // <---------------------------- attribute index
3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
Since you need one color per vertex coordinate, but you want every line to be drawn in a single color, you have to do it like this:
void addColor()
{
int numberOfPoints = points.size() / 2;
for (int i = 0; i < numberOfPoints/2; ++i)
{
glm::vec3 col(
rand() / (float)RAND_MAX,
rand() / (float)RAND_MAX,
rand() / (float)RAND_MAX);
colors.push_back(col);
colors.push_back(col);
}
}

Lighting is misbehaving in shader program

Here is my shader program:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec3 vertexNormal_modelspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform mat3 blNormalMatrix;
uniform vec3 lightPos;
out vec4 forFragColor;
const vec3 diffuseColor = vec3(0.55, 0.09, 0.09);
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * diffuseColor;
// all following gemetric computations are performed in the
// camera coordinate system (aka eye coordinates)
vec3 vertexNormal_cameraspace = (V*M*vec4(vertexNormal_modelspace,0)).xyz;
vec4 vertexPosition_cameraspace4 = V*M* vec4(vertexPosition_modelspace,1);
vec3 vertexPosition_cameraspace = vec3(vertexPosition_cameraspace4).xyz;
vec3 lightDir = normalize(lightPos - vertexPosition_cameraspace);
float lambertian = clamp(dot(lightDir,vertexNormal_cameraspace), 0.0,1.0);
forFragColor = vec4(lambertian*diffuseColor , 1.0);
}
My problem is that this "worked" in the older opengl profile, didn't even have the version number, I think it was around Opengl 2.1 or so, the key change was that I originally had normal = gl_normalMatrix * gl_normal and things worked.
However that was based on my professor's code which I've updated to the 3.3+ core profile and after maybe fixing the deprecated functions I am now left with this:
https://drive.google.com/file/d/0B6oLZ_d7S-U7cVpkUXpVXzdaZEk/edit?usp=sharing is a link to the video of my program's behavior.
The light source should be a point light at (0,0,3) or so that shouldn't move; but its not following a particularly logical behaviorial pattern, I can't make sense of it.
I tried passing the inverse transpose of the model matrix and using them as a replacement normalMatrix but it wrecked my normals. So I don't know.
This was my normalMatrix:
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelMatrix)));
Edit: Here is my Display code:
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
// Use our shader
glUseProgram(programID);
// Get our transformations iff we move the camera around.
glm::mat4 MyModelMatrix = ModelMatrix * thisTran * ThisRot;
MVP = ProjectionMatrix * ViewMatrix * MyModelMatrix;
glm::mat4 ModelView = ViewMatrix * MyModelMatrix;
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelView)));
glm::vec3 newLightPos = lightPos;
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(ModelMatrixID, 1, GL_FALSE, &MyModelMatrix[0][0]);
glUniformMatrix4fv(ViewMatrixID, 1, GL_FALSE, &ViewMatrix[0][0]);
glUniformMatrix4fv(BlNormalMatrix,1,GL_FALSE, &MyNormalMatrix[0][0]);
glUniformMatrix4fv(BlRotations, 1, GL_FALSE, &ThisRot[0][0]);
glUniform3f(BlCamera, cameraLoc.x, cameraLoc.y, cameraLoc.z);
glUniform3f(lPosition, newLightPos.x,newLightPos.y,newLightPos.z);
// VBO buffer: vertices
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2rd attribute buffer : normals
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glVertexAttribPointer(
1, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// draw object using opengl 3.3 shit
glDrawArrays(GL_TRIANGLES, 0, vertices.size() );
The problem ultimately turned out to be an issue with the Model Loader provided by my Professor, was somehow incompatible with modern opengl and would only "mostly" work in that it was clearly missing the left/right normals or they had invalid values. Solved with using an implementation of Assimp.
The code, with assimp linked is like this:
void blInitResWAssimp() {
cout << "blInitResWAssimp" << endl;
blCreateModelViewProjectionMatrix();
//loads object
bool res = loadAssImp("Resources/RCSS-subdiv.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
//bool res = loadAssImp("Resources/cheb.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// Load it into a VBO
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_vertices.size() * sizeof(glm::vec3), &indexed_vertices[0], GL_STATIC_DRAW);
// Normal buffer
glGenBuffers(1, &normalbuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_normals.size() * sizeof(glm::vec3), &indexed_normals[0], GL_STATIC_DRAW);
// Generate a buffer for the indices as well
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW);
//ModelMatrix = ModelMatrix * glm::translate(glm::mat4(1.0f), glm::vec3(-0.5, -0.5, 0));
}
Assimp stuff
bool loadAssImp(
const char * path,
std::vector<unsigned short> & indices,
std::vector<glm::vec3> & vertices,
std::vector<glm::vec2> & uvs,
std::vector<glm::vec3> & normals
){
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile(path, 0/*aiProcess_JoinIdenticalVertices | aiProcess_SortByPType*/);
if (!scene) {
fprintf(stderr, importer.GetErrorString());
getchar();
return false;
}
const aiMesh* mesh = scene->mMeshes[0]; // In this simple example code we always use the 1rst mesh (in OBJ files there is often only one anyway)
const aiMaterial* material = scene->mMaterials[0];
// Fill vertices positions
vertices.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D pos = mesh->mVertices[i];
vertices.push_back(glm::vec3(pos.x, pos.y, pos.z));
}
// Fill vertices texture coordinates
/*
uvs.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D UVW = mesh->mTextureCoords[0][i]; // Assume only 1 set of UV coords; AssImp supports 8 UV sets.
uvs.push_back(glm::vec2(UVW.x, UVW.y));
}*/
// Fill vertices normals
normals.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D n = mesh->mNormals[i];
//aiVector3D n = mesh->mVertices[i];
normals.push_back(glm::vec3(n.x, n.y, n.z));
}
// Fill face ind5ices
indices.reserve(3 * mesh->mNumFaces);
for (unsigned int i = 0; i<mesh->mNumFaces; i++){
// Assume the model has only triangles.
indices.push_back(mesh->mFaces[i].mIndices[0]);
indices.push_back(mesh->mFaces[i].mIndices[1]);
indices.push_back(mesh->mFaces[i].mIndices[2]);
}
// The "scene" pointer will be deleted automatically by "importer"
}