I'm trying to use glDrawElements to draw a model loaded from assimp, geometry displays fine but textures dont show up on the model, i just get a black version of the model i've loaded.
Load Model Function
ModelInfo LoadModel(const std::string& modelPath){
printf( "Loading model: %s\n", modelPath.c_str());
//verify that file exists first
std::ifstream fin(modelPath.c_str());
if(!fin.fail()){
fin.close();
}else{
throw std::runtime_error("could not open file" + modelPath);
}
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile( modelPath,
aiProcessPreset_TargetRealtime_Fast |
//aiProcess_CalcTangentSpace |
aiProcess_Triangulate |
aiProcess_GenSmoothNormals |
aiProcess_FlipUVs
//aiProcess_JoinIdenticalVertices |
//aiProcess_SortByPType);
);
if(!scene){
throw std::runtime_error(importer.GetErrorString());
}
printf("imported %s\n",modelPath.c_str());
fflush(stdout);
std::vector<unsigned int> indices;
std::vector<float> vertices;
std::vector<float> uvs;
std::vector<float> normals;
aiMesh* mesh = scene->mMeshes[0];
int numOfFaces = mesh->mNumFaces;
int numOfIndices = numOfFaces * 3;
indices.resize(numOfIndices);
for (unsigned int i =0; i < mesh->mNumFaces; ++i){
const aiFace &face = mesh->mFaces[i];
assert(face.mNumIndices == 3);
indices[i * 3 + 0] = face.mIndices[0];
indices[i * 3 + 1] = face.mIndices[1];
indices[i * 3 + 2] = face.mIndices[2];
}
int numOfVertices = mesh->mNumVertices;
vertices.resize(numOfVertices * 3);
normals.resize(numOfVertices * 3);
uvs.resize(numOfVertices * 2);
for( unsigned int i = 0; i < mesh->mNumVertices; ++i){
if(mesh->HasPositions()){
vertices[i * 3 + 0] = mesh->mVertices[i].x;
vertices[i * 3 + 1] = mesh->mVertices[i].y;
vertices[i * 3 + 2] = mesh->mVertices[i].z;
//printf("[ %f, %f, %f]\n",vertices[i*3+0],vertices[i*3+1],vertices[i*3+2]);
}
if( mesh->HasNormals()){
normals[i * 3 + 0] = mesh->mNormals[i].x;
normals[i * 3 + 1] = mesh->mNormals[i].x;
normals[i * 3 + 2] = mesh->mNormals[i].x;
}
if(mesh->HasTextureCoords(0)){
uvs[i * 2 + 0] = mesh->mTextureCoords[0][i].x;
uvs[i * 2 + 1] = mesh->mTextureCoords[0][i].y;
printf("[ %f, %f]\n",uvs[i*2+0],uvs[i*2+1]);
}
}
//create voa
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
//create element buffer
GLuint elementBuffer;
glGenBuffers(1, &elementBuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementBuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned int), &indices[0], GL_STATIC_DRAW);
//create vertex buffer
GLuint vertexBuffer;
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(aiVector3D), &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
printf("vertices.size is %lu\n", vertices.size());
printf("uvs.size is %lu\n", uvs.size());
GLuint uvBuffer;
glGenBuffers(1, &uvBuffer);
glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(aiVector2D), &uvs[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 2*sizeof(float), (void*)0);
GLuint normalBuffer;
glGenBuffers(1, &normalBuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalBuffer);
glBufferData(GL_ARRAY_BUFFER, normals.size() * sizeof(aiVector3D), &normals[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
ModelInfo retval;
retval.vao = vao;
retval.index = elementBuffer;
retval.vertex = vertexBuffer;
retval.uv = uvBuffer;
retval.normal = normalBuffer;
retval.count = numOfIndices;
return retval;
}
Short overview of Load Model, loads the model with assimp, then loads the data from the first mesh in the assimp scene into vectors, then creates a vao, vbos for geometry, texture coordinates, normals, and indexes. Data is then loaded into the buffers from the vectors and vertex attributes are set. Once all is loaded a struct containing several GLuints is created to return data.
Render Model function
void render_model(ModelInfo info){
cleanup();
//set program
glUseProgram(modelprogram);
//set uniforms
glm::mat4 view = camera.matrix();
glUniformMatrix4fv(modelcamera, 1, GL_FALSE, &view[0][0]);
glm::mat4 model = glm::mat4();
model = glm::rotate(model, degree, glm::vec3(0,1,1));
glUniformMatrix4fv(modelmodel, 1, GL_FALSE, &model[0][0]);
glBindVertexArray(info.vao);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(modeltext, texture);
glDrawElements(GL_TRIANGLES, info.count, GL_UNSIGNED_INT, (void*)0);
}
Brief overview of render model. Set the shader program to handle rendering the model, get matrix for the camera, pass it to the shader, generate a matrix for the model, make it spin, pass it to the shader. Bind the vao of the model to be rendered. Bind a previously loaded text, which is also used on other geometry in the program where it shows up no problem, pass it to the shader. all is set, call glDrawElements starting from the first position in the index buffer of the current vao. Once drawing is complete do clean up to unbind any buffers and arrays
the shaders i'm using for this are very basic
vertex shader
#version 140
uniform mat4 camera;
uniform mat4 model;
in vec3 position;
in vec2 uv;
out vec2 fragUV;
void main(){
//pass variables on to fragment shader
fragUV = uv;
//vertex to draw
gl_Position = camera * model * vec4(position,1);
}
fragment shader
#version 140
uniform sampler2D modeltext;
in vec2 fragUV;
out vec4 finalColor;
void main(){
finalColor = texture(modeltext, fragUV);
}
All uniforms are loaded correctly with a function that verifies that something was actually loaded, the texture is used and works else where in the program. The model has texture coordinates. The geometry is loading and rendering no problem
You are passing the wrong value to the sampler uniform.
glBindTexture (GL_TEXTURE_2D, texture);
glUniform1i (modeltext, texture);
Sampler uniforms do not take the name (ID) of a texture object, they take the index of the Texture Image Unit that you bound your texture to. In this example, you appear to be using the default Texture Image Unit: GL_TEXTURE0, so the value that your sampler uniform should use is 0.
Because a sampler2D actually expects the Texture Image Unit and not the name of a texture, you never have to change this uniform when you change the bound texture. GLSL will always sample whatever texture is bound to the corresponding Texture Image Unit (assuming they have compatible types / states).
This means that generally you set sampler uniforms once when you initialize your GLSL program and never touch it again. In newer versions of GLSL (4.20) you can even hard-code the Texture Image Unit that a sampler uses in the shader itself, but since you are using GLSL 1.40 you do not have this option unless the extension: GL_ARB_shading_language_420pack is supported.
Consider the following code, which correctly demonstrates how to use sampler uniforms:
glActiveTexture (GL_TEXTURE0);
glBindTexture (GL_TEXTURE_2D, texture);
glUniform1i (modeltext, 0);
I have included the redundant call to glActiveTexture (...) merely to demonstrate how the uniform sampler and active texture unit are related... you do not pass the constant GL_TEXTURE0 to a sampler, instead you use the integer index. GL_TEXTURE0 is the default active texture unit.
Related
I have a very large set of points (~300k) derived from an rgb & depth image and for the sake of obtaining geometric information I have calculated the normal vector for each of those points. However, I can't proceed to any further calculations until I'm certain the normal vectors are correct, so I decided to display each vector with a line:
glm::vec3 lvec = point + normal;
normals[index + 0] = point.x;
normals[index + 1] = point.y;
normals[index + 2] = point.z;
normals[index + 3] = lvec.x;
normals[index + 4] = lvec.y;
normals[index + 5] = lvec.z;
Here point is a vector containing each point's coordinates and normal is its normal vector. I store both ends of the line contiguously in an array and after buffering, I use GL_LINES to draw the data.
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, height * width * 6 * sizeof(GLfloat),
normals, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
draw function:
glDisable(GL_PROGRAM_POINT_SIZE);
glLineWidth(3.0);
//binding shader program and setting uniform variables
glUseProgram(shader);
glUniformMatrix4fv(modelLocation, 1, GL_FALSE, &model[0][0]);
glUniformMatrix4fv(viewLocation, 1, GL_FALSE, &view[0][0]);
glUniformMatrix4fv(projectionLocation, 1, GL_FALSE, &projection[0][0]);
//binding vertex array object and drawing
glBindVertexArray(VAO);
glDrawArrays(GL_LINES, 0, width * height * 2);
vertex shader:
#version 330 core
layout(location = 0) in vec3 position;
uniform mat4 model, view, projection;
void main() {
gl_Position = projection * view * model * vec4(position, 1.0);
}
fragment shader:
#version 330 core
out vec4 colour;
void main() {
colour = vec4(1.0, 0.0, 0.0, 1.0);
}
Naturally, there are 300k lines being drawn and the result is too dense for me to understand whether they are correct or not. Is there a way I could randomly discard some of them? There is a single draw call that does the drawing so I can't think of any way to use uniform variables to select which ones get drawn.
Thanks in advance.
So I've been fiddling with an old University project done in OpenGL 3.3 (FreeGLUT + GLEW) and I've run into some problems.
Right at the start, I run the program and I'm getting an error compiling the BumpMap Fragment Shader:
#version 330 core
#define lightCount 10
in vec4 vertPos;
in vec4 eyeModel;
in vec3 normalInterp;
in vec4 ambient;
in vec4 color;
in vec4 spec;
in vec2 texuv;
in vec4 lightPosition[lightCount];
struct LightSource {
vec4 Position;
vec4 Direction;
vec4 Color;
float CutOff;
float AmbientIntensity;
float DiffuseIntensity;
float SpecularIntensity;
float ConstantAttenuation;
float LinearAttenuation;
float ExponentialAttenuation;
int lightType;
};
layout(std140) uniform LightSources {
LightSource lightSource[10];
};
uniform sampler2D diffuse_tex;
uniform sampler2D normal_tex;
out vec4 out_Color;
void main() {
out_Color = vec4(0);
for(int i=0; i<lightCount; i++) {
if(lightSource[i].lightType == 0)
continue;
vec3 NormalMap = texture2D(normal_tex, texuv).rgb;
vec3 normal = normalize(NormalMap * 2.0 - 1.0); //normalize(normalInterp);
vec4 LightDirection = vertPos - lightSource[i].Position;
float Distance = length(LightDirection);
LightDirection = normalize(LightDirection);
vec4 ambientColor = ambient * lightSource[i].Color * lightSource[i].AmbientIntensity;
vec4 diffuseColor = vec4(0, 0, 0, 0);
vec4 dColor = texture2D(diffuse_tex, texuv);
vec4 specularColor = vec4(0, 0, 0, 0);
float DiffuseFactor = dot(normal, vec3(-LightDirection));
if (DiffuseFactor > 0) {
diffuseColor = dColor * lightSource[i].Color * lightSource[i].DiffuseIntensity * DiffuseFactor;
vec3 VertexToEye = normalize(vec3(eyeModel - vertPos));
vec3 LightReflect = normalize(reflect(vec3(LightDirection), normal));
float SpecularFactor = dot(VertexToEye, LightReflect);
SpecularFactor = pow(SpecularFactor, 255);
if(SpecularFactor > 0.0){
//SpecularFactor = pow( max(SpecularFactor,0.0), 255);
specularColor = spec * lightSource[i].Color * lightSource[i].SpecularIntensity * SpecularFactor;
}
}
out_Color += ambientColor + diffuseColor + specularColor;
}
}
ERROR: 0:55: 'function' : is removed in Forward Compatible context texture2D
ERROR: 0:55: 'texture2D' : no matching overloaded function found (using implicit conversion)
So I looked the problem up and even though I thought it was weird I was getting this problem on a project I knew had been in working condition, I switched the texture2D call for a texture call and now the shader compiles, but I get a different error, where creating the buffer object for the first object in the scene:
//Consts defined here for readability
#define VERTICES 0
#define COLORS 1
#define NORMALS 2
#define TEXUVS 3
#define AMBIENTS 4
#define TANGENTS 5
#define SPECULARS 6
#define SPECULARS_CONSTANTS 7
#define NOISE_SCALE 8
void BufferObject::createBufferObject() {
glGenVertexArrays(1, &_vertexArrayObjectID);
glBindVertexArray(_vertexArrayObjectID);
glGenBuffers(1, &_vertexBufferObjectID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*_vertexCount, _vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(VERTICES);
glVertexAttribPointer(VERTICES, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
glEnableVertexAttribArray(COLORS);
glVertexAttribPointer(COLORS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)sizeof(_vertices[0].XYZW));
glEnableVertexAttribArray(NORMALS);
glVertexAttribPointer(NORMALS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)));
glEnableVertexAttribArray(TEXUVS);
glVertexAttribPointer(TEXUVS, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)));
glEnableVertexAttribArray(AMBIENTS);
glVertexAttribPointer(AMBIENTS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)));
glEnableVertexAttribArray(TANGENTS);
glVertexAttribPointer(TANGENTS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)));
glEnableVertexAttribArray(SPECULARS);
glVertexAttribPointer(SPECULARS, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)+sizeof(_vertices[0].TANGENT)));
glEnableVertexAttribArray(SPECULARS_CONSTANTS);
glVertexAttribPointer(SPECULARS_CONSTANTS, 1, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)(sizeof(_vertices[0].XYZW)+sizeof(_vertices[0].RGBA)+sizeof(_vertices[0].NORMAL)+sizeof(_vertices[0].TEXUV)+sizeof(_vertices[0].AMBIENT)+sizeof(_vertices[0].TANGENT)+sizeof(_vertices[0].SPECULAR)));
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_UNIFORM_BUFFER, 0);
glDisableVertexAttribArray(VERTICES);
glDisableVertexAttribArray(COLORS);
glDisableVertexAttribArray(NORMALS);
glDisableVertexAttribArray(TEXUVS);
glDisableVertexAttribArray(AMBIENTS);
glDisableVertexAttribArray(TANGENTS);
glDisableVertexAttribArray(SPECULARS);
glDisableVertexAttribArray(SPECULARS_CONSTANTS);
Utility::checkOpenGLError("ERROR: Buffer Object creation failed.");
}
OpenGL ERROR [Invalid Operation] = 1282
And that's all the info I'm getting. I've moved the checkOpenGLError around and figured out the line glDisableVertexAttribArray(VERTICES) is giving the error.
After a bit more of digging I found out that you're not supposed to set glBindVertexArray(0) (at least before you glDisableVertexAttribArray, from what I remember we set those flags to 0 so we wouldn't accidentally affect anything we didn't want)
At this point the error moves to where we're drawing one of the scene objects. At this point I've hit a bit of a wall and don't where to go to next. I guess my question is whether there is a configuration when running the project that needs to be set, or whether just running this on a more recent graphics card could account for the different behaviour. As a final note, this is running on windows off of Visual Studio 10 (or 15, switched to 10 when reverted all changes and didn't retarget the solution) in widnows, the program configurations are as follows:
//GLUT Init
glutInit(&argc, argv);
glutInitContextVersion(3, 3);
glutInitContextFlags(GLUT_FORWARD_COMPATIBLE);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutSetOption(GLUT_ACTION_ON_WINDOW_CLOSE,GLUT_ACTION_GLUTMAINLOOP_RETURNS);
glutInitWindowSize(windowWidth, windowHeight);
glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA);
windowHandle = glutCreateWindow(CAPTION);
//GLEW Init
glewExperimental = GL_TRUE;
GLenum result = glewInit();
//GLUT Init
std::cerr << "CONTEXT: OpenGL v" << glGetString(GL_VERSION) << std::endl;
glClearColor(0.1f, 0.1f, 0.1f, 1.0f);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
glDepthMask(GL_TRUE);
glDepthRange(0.0,1.0);
glClearDepth(1.0);
glEnable(GL_CULL_FACE);
glCullFace(GL_BACK);
glFrontFace(GL_CCW);
with the context above being:
CONTEXT: OpenGL v3.3.0 - Build 22.20.16.4749
Let me know if any aditional information is required, I didn't want to add any more unnecessary clutter and the project is too big to just paste it all here...
In your shader you are using glsl version 330 core, which means texture2D() is deprecated and you should use texture() instead.
As for your INVALID OPERATION error, the problem is that you unbound the vao with glBindVertexArray(0); and then called glDisableVertexAttribArray(VERTICES); which operates on the currently bound vao. You should move glBindVertexArray(0); under these calls.
First let me refer to the specification, OpenGL 4.6 API Core Profile Specification; 10.3.1 Vertex Array Objects; page 347 :
The name space for vertex array objects is the unsigned integers, with zero reserved by the GL.
...
A vertex array object is created by binding a name returned by GenVertexArray with the command
void BindVertexArray( uint array );
array is the vertex array object name. The resulting vertex array object is a new state vector, comprising all the state and with the same initial values listed in tables 23.3 and 23.4.
BindVertexArray may also be used to bind an existing vertex array object. If the bind is successful no change is made to the state of the bound vertex array object, and any previous binding is broken.
Tables 23.3, Vertex Array Object State
VERTEX_ATTRIB_ARRAY_ENABLED, VERTEX_ATTRIB_ARRAY_SIZE, VERTEX_ATTRIB_ARRAY_STRIDE, VERTEX_ATTRIB_ARRAY_TYPE, VERTEX_ATTRIB_ARRAY_NORMALIZED, VERTEX_ATTRIB_ARRAY_INTEGER, VERTEX_ATTRIB_ARRAY_LONG, VERTEX_ATTRIB_ARRAY_DIVISOR, VERTEX_ATTRIB_ARRAY_POINTER
Table 23.4, Vertex Array Object State
ELEMENT_ARRAY_BUFFER_BINDING, VERTEX_ATTRIB_ARRAY_BUFFER_BINDING, VERTEX_ATTRIB_BINDING, VERTEX_ATTRIB_RELATIVE_OFFSET, VERTEX_BINDING_OFFSET, VERTEX_BINDING_STRIDE, VERTEX_BINDING_DIVISOR, VERTEX_BINDING_BUFFER.
This means that a Vertex Array Object collects all the information which is necessary to draw an object. In the vertex array object is stored the information about the location of the vertex attributes and the format.
Further the vertex array object "knows" whether an attribute is enabled or disabled.
If you do
glBindVertexArray(0);
glDisableVertexAttribArray( .... );
this causes an INVALID_OPERATION error, when you use a core profile OpenGL context, because then the vertex array object 0 is not a valid vertex array object.
If you would use a compatibility profile context this would not cause an error, because then the vertex array object 0 is the default vertex array object and is valid.
If you do
glBindVertexArray(_vertexArrayObjectID);
glEnableVertexAttribArray( ..... );
glVertexAttribPointer( ..... );
glDisableVertexAttribArray( ..... );
glBindVertexArray(0);
then the draw call will fail. You have made the effort to define all the arrays of generic vertex attributes data and to enable them all correctly, but right after doing that you disable them again. So in the vertex array object is stored the "disabled" state for all the attributes.
The correct procedure to define a vertex array object is:
Generate the vertex buffers, and creates and initializes the buffer object's data store (this step can be done after creating and binding the vertex array object, too):
glGenBuffers(1, &_vertexBufferObjectID);
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glBufferData(GL_ARRAY_BUFFER, sizeof(Vertex)*_vertexCount, _vertices, GL_STATIC_DRAW);
Generate and bind the vertex array object:
glGenVertexArrays(1, &_vertexArrayObjectID);
glBindVertexArray(_vertexArrayObjectID);
Define and enable the arrays of generic vertex attributes data (this has to be done after binding the vertex array object):
glBindBuffer(GL_ARRAY_BUFFER, _vertexBufferObjectID);
glEnableVertexAttribArray(VERTICES);
glVertexAttribPointer(VERTICES, 4, GL_FLOAT, GL_FALSE, sizeof(Vertex), 0);
....
glBindBuffer(GL_ARRAY_BUFFER, 0);
If you would use an element buffer (GL_ELEMENT_ARRAY_BUFFER), then you would have to specify it now, because the name of (reference to) the element buffer object is stored in the vertex array object, and this has to be currently bound, when the element buffer object gets bound.
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, ..... );
Finally you can do glBindVertexArray(0). But there is no reason to do that. It is sufficient to bind a new vertex array object, before you specify a new mesh, or to bind the proper vertex array object before you draw a mesh.
Further there is no need for glDisableVertexAttribArray, as long you don't want to change the vertex array object specification. The state "enabled" is stored in the vertex array object an kept there. If you bind a new vertex array object, the then the object and so all its states become current.
Now drawing is simple:
glBindVertexArray(_vertexArrayObjectID);
glDrawArrays( .... );
Again there is no need of glBindVertexArray(0), after the draw call (especially in core mode).
I'm using SIFT algorithm and I want to draw lines between keypoints in differents image. I made it, but actually, all my lines have the same color so it's unreadable.
What I want to achieve is to set a random color to each line, but 1 and only 1 color to a line.
I have to use shaders to do that, and so I send LINES and POINTS (that is the color) to the same shader, and I don't know what's wrong in my code (I have a crash when trying to execute my code. EDIT : It is not that something is wrong in my code (well, obviously yes...) but the error cause a crash, like if I had a segmentation fault. So I think my errors is due to a wrong place allocation for my color array (because it worked without this array))
my code :
std::vector<GLfloat> points;
std::vector<glm::vec3> colors;
GLuint VAO, VBO[2];
void setupLines() {
glGenVertexArrays(1, &VAO);
glGenBuffers(2, &VBO[0]);
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO[0]);
glBufferData(GL_ARRAY_BUFFER, points.size() * sizeof(GLfloat), &points[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, VBO[1]);
glBufferData(GL_ARRAY_BUFFER, colors.size() * sizeof(glm::vec3), &colors[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER,0);
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
}
void draw() {
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO[0]);
glDrawArrays(GL_LINES, 0, points.size());
//The error occurs here, it seems...
glBindBuffer(GL_ARRAY_BUFFER, VBO[1]);
glDrawArrays(GL_POINTS, 0, colors.size());
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
//for each points, we create the same colors 2by 2 so lines (that are 2 points) have the same colors
void addColor() {
for (int i = 0; i < points.size()/2; ++i) {
float a = rand() / (float)RAND_MAX;
float b = rand() / (float)RAND_MAX;
float c = rand() / (float)RAND_MAX;
colors.push_back(glm::vec3(a, b, c));
colors.push_back(glm::vec3(a, b, c));
}
}
and my vertex Shader :
#version 330 core
layout (location = 0) in vec2 aTexCoord;
layout (location = 1) in vec3 color;
out vec2 TexCoord;
out vec3 Col;
void main()
{
TexCoord = vec2(aTexCoord.xy);
Col = color;
}
and then I use Col in fragment shader to color.
Is it how I have to do this?
You have to set the current position gl_Position in the vertex shader.
The vertex coordinate has to be an attribute:
in vec3 aVertCoord;
and you have to assigne the coordinate to gl_Position:
gl_Position = vec4(aVertCoord.xyz, 1.0);
Note, for 2D coordinates it is should be something like:
in vec2 aVertCoord;
void main()
{
.....
gl_Position = vec4(aVertCoord.xy, 0.0, 1.0);
}
In final you have 2 vertex attributes. The vertex coordinates and the color. You do not need any texture coordinates, because you draw lines (GL_LINES). But I guess what you call aTexCoord is the vertex position, so you have to do it like this:
#version 330 core
layout (location = 0) in vec2 aTexCoord;
layout (location = 1) in vec3 color;
out vec3 TexCoord;
out vec3 Col;
void main()
{
gl_Position = vec4(aTexCoord.xy, 0.0, 1.0);
TexCoord = aTexCoord.xy;
Col = color;
}
The vertex array object VAO stores the states of the generic vertex attributes (glVertexAttribPointer, glEnableVertexAttribArray). The vertex attribute state may refer to a vertex array buffer. You have to bind the vertex array object VAO only, when you draw the object (lines):
void draw() {
glBindVertexArray(VAO);
int numberOfPoints = points.size() / 2; // Number of points, not the number of floats
glDrawArrays(GL_LINES, 0, numberOfPoints );
glBindVertexArray(0);
}
Note, it sufficient to call glDrawArrays one time.
Further, the 1st paramter of glVertexAttribPointer is the attribute index:
glBindBuffer(GL_ARRAY_BUFFER, VBO[1]);
glBufferData(GL_ARRAY_BUFFER, colors.size() * sizeof(glm::vec3), &colors[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(
1, // <---------------------------- attribute index
3, GL_FLOAT, GL_FALSE, 0, (GLvoid*)0);
Since you need one color per vertex coordinate, but you want every line to be drawn in a single color, you have to do it like this:
void addColor()
{
int numberOfPoints = points.size() / 2;
for (int i = 0; i < numberOfPoints/2; ++i)
{
glm::vec3 col(
rand() / (float)RAND_MAX,
rand() / (float)RAND_MAX,
rand() / (float)RAND_MAX);
colors.push_back(col);
colors.push_back(col);
}
}
Here is my shader program:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec3 vertexNormal_modelspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform mat3 blNormalMatrix;
uniform vec3 lightPos;
out vec4 forFragColor;
const vec3 diffuseColor = vec3(0.55, 0.09, 0.09);
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * diffuseColor;
// all following gemetric computations are performed in the
// camera coordinate system (aka eye coordinates)
vec3 vertexNormal_cameraspace = (V*M*vec4(vertexNormal_modelspace,0)).xyz;
vec4 vertexPosition_cameraspace4 = V*M* vec4(vertexPosition_modelspace,1);
vec3 vertexPosition_cameraspace = vec3(vertexPosition_cameraspace4).xyz;
vec3 lightDir = normalize(lightPos - vertexPosition_cameraspace);
float lambertian = clamp(dot(lightDir,vertexNormal_cameraspace), 0.0,1.0);
forFragColor = vec4(lambertian*diffuseColor , 1.0);
}
My problem is that this "worked" in the older opengl profile, didn't even have the version number, I think it was around Opengl 2.1 or so, the key change was that I originally had normal = gl_normalMatrix * gl_normal and things worked.
However that was based on my professor's code which I've updated to the 3.3+ core profile and after maybe fixing the deprecated functions I am now left with this:
https://drive.google.com/file/d/0B6oLZ_d7S-U7cVpkUXpVXzdaZEk/edit?usp=sharing is a link to the video of my program's behavior.
The light source should be a point light at (0,0,3) or so that shouldn't move; but its not following a particularly logical behaviorial pattern, I can't make sense of it.
I tried passing the inverse transpose of the model matrix and using them as a replacement normalMatrix but it wrecked my normals. So I don't know.
This was my normalMatrix:
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelMatrix)));
Edit: Here is my Display code:
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
// Use our shader
glUseProgram(programID);
// Get our transformations iff we move the camera around.
glm::mat4 MyModelMatrix = ModelMatrix * thisTran * ThisRot;
MVP = ProjectionMatrix * ViewMatrix * MyModelMatrix;
glm::mat4 ModelView = ViewMatrix * MyModelMatrix;
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelView)));
glm::vec3 newLightPos = lightPos;
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(ModelMatrixID, 1, GL_FALSE, &MyModelMatrix[0][0]);
glUniformMatrix4fv(ViewMatrixID, 1, GL_FALSE, &ViewMatrix[0][0]);
glUniformMatrix4fv(BlNormalMatrix,1,GL_FALSE, &MyNormalMatrix[0][0]);
glUniformMatrix4fv(BlRotations, 1, GL_FALSE, &ThisRot[0][0]);
glUniform3f(BlCamera, cameraLoc.x, cameraLoc.y, cameraLoc.z);
glUniform3f(lPosition, newLightPos.x,newLightPos.y,newLightPos.z);
// VBO buffer: vertices
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2rd attribute buffer : normals
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glVertexAttribPointer(
1, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// draw object using opengl 3.3 shit
glDrawArrays(GL_TRIANGLES, 0, vertices.size() );
The problem ultimately turned out to be an issue with the Model Loader provided by my Professor, was somehow incompatible with modern opengl and would only "mostly" work in that it was clearly missing the left/right normals or they had invalid values. Solved with using an implementation of Assimp.
The code, with assimp linked is like this:
void blInitResWAssimp() {
cout << "blInitResWAssimp" << endl;
blCreateModelViewProjectionMatrix();
//loads object
bool res = loadAssImp("Resources/RCSS-subdiv.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
//bool res = loadAssImp("Resources/cheb.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// Load it into a VBO
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_vertices.size() * sizeof(glm::vec3), &indexed_vertices[0], GL_STATIC_DRAW);
// Normal buffer
glGenBuffers(1, &normalbuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_normals.size() * sizeof(glm::vec3), &indexed_normals[0], GL_STATIC_DRAW);
// Generate a buffer for the indices as well
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW);
//ModelMatrix = ModelMatrix * glm::translate(glm::mat4(1.0f), glm::vec3(-0.5, -0.5, 0));
}
Assimp stuff
bool loadAssImp(
const char * path,
std::vector<unsigned short> & indices,
std::vector<glm::vec3> & vertices,
std::vector<glm::vec2> & uvs,
std::vector<glm::vec3> & normals
){
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile(path, 0/*aiProcess_JoinIdenticalVertices | aiProcess_SortByPType*/);
if (!scene) {
fprintf(stderr, importer.GetErrorString());
getchar();
return false;
}
const aiMesh* mesh = scene->mMeshes[0]; // In this simple example code we always use the 1rst mesh (in OBJ files there is often only one anyway)
const aiMaterial* material = scene->mMaterials[0];
// Fill vertices positions
vertices.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D pos = mesh->mVertices[i];
vertices.push_back(glm::vec3(pos.x, pos.y, pos.z));
}
// Fill vertices texture coordinates
/*
uvs.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D UVW = mesh->mTextureCoords[0][i]; // Assume only 1 set of UV coords; AssImp supports 8 UV sets.
uvs.push_back(glm::vec2(UVW.x, UVW.y));
}*/
// Fill vertices normals
normals.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D n = mesh->mNormals[i];
//aiVector3D n = mesh->mVertices[i];
normals.push_back(glm::vec3(n.x, n.y, n.z));
}
// Fill face ind5ices
indices.reserve(3 * mesh->mNumFaces);
for (unsigned int i = 0; i<mesh->mNumFaces; i++){
// Assume the model has only triangles.
indices.push_back(mesh->mFaces[i].mIndices[0]);
indices.push_back(mesh->mFaces[i].mIndices[1]);
indices.push_back(mesh->mFaces[i].mIndices[2]);
}
// The "scene" pointer will be deleted automatically by "importer"
}
I'm currently trying to set up a GPU skinning (with glsl) but it's not working the way I would :) Actually it's not working at all. My mesh disappear when I try this glsl code :
layout(location = 0) in vec3 vertexPos;
layout(location = 1) in vec2 vertexUv;
layout(location = 2) in vec3 vertexNor;
layout(location = 5) in ivec4 joints_influences;
layout(location = 6) in vec4 weights_influences;
uniform mat4 ViewProj, View, Proj, Model;
out vec3 vertexPosEye;
out vec3 vertexNorEye;
const int MAX_INFLUENCES = 4;
const int MAX_BONES = 50;
uniform mat4 animation_matrices[MAX_BONES];
uniform mat4 inv_bind_matrices[MAX_BONES];
void main()
{
vertexPosEye = (View * Model * vec4(vertexPos, 1)).xyz; // Position
vertexNorEye = (View * Model * vec4(vertexNor, 0)).xyz; // Normal matrix
vec4 final_v = vec4(0, 0, 0, 0);
for (int i = 0; i < MAX_INFLUENCES; i++)
{
vec4 v = vec4(vertexPos, 1)
* inv_bind_matrices[joints_influences[i]]
* animation_matrices[joints_influences[i]]
* weights_influences[i];
final_v += v;
}
gl_Position = ViewProj * Model * final_v;
}
when I try this :
gl_Position = ViewProj * Model * vertexPos;
My mesh is back :) but no animations anymore of course...
Here's my application (c++) code when I set VBO attributes :
// Vertex position
glGenBuffers(1, &buffer[0]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[0]);
glBufferData(GL_ARRAY_BUFFER, vertices.pos.size() * sizeof(bVector3), &vertices.pos[0], GL_STATIC_DRAW);
// Ibid for uv, normals, tangents and bitangents.
// Skinning : joints index
glGenBuffers(1, &buffer[5]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[5]);
glBufferData(GL_ARRAY_BUFFER, vertices.joints.size() * sizeof(SkinningJoints), &vertices.joints[0], GL_STATIC_DRAW);
// Skinning : weights
glGenBuffers(1, &buffer[6]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[6]);
glBufferData(GL_ARRAY_BUFFER, vertices.weights.size() * sizeof(SkinningWeights), &vertices.weights[0], GL_STATIC_DRAW);
// Indices
glGenBuffers(1, &buffer[7]);
glBindBuffer(GL_ARRAY_BUFFER, buffer[7]);
glBufferData(GL_ARRAY_BUFFER, vertices.indices.size() * sizeof(bUshort), &vertices.indices[0], GL_STATIC_DRAW);
In the main loop :
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(0));
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
glEnableVertexAttribArray(for uv, normals, tangents and bitangents)...
glEnableVertexAttribArray(5);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(5));
glVertexAttribPointer(5, 4, GL_INT, GL_FALSE, 0, BUFFER_OFFSET(0));
glEnableVertexAttribArray(6);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(6));
glVertexAttribPointer(6, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m->GetBuffer(7));
glDrawElements(GL_TRIANGLES, m->vertices.indices.size(), GL_UNSIGNED_SHORT, BUFFER_OFFSET(0));
Here is my RenderingVertices struct (after Barr's recomendations):
struct RenderingVertices
{
// std::vector<Vec3>
vVec3 pos, nor, tang, btan;
vVec2 uv;
vUshort indices;
vector<SkinningJoints> joints;
vector<SkinningWeights> weights;
};
And here is my SkinningJoints struct :
struct SkinningJoints
{
int j[MAX_BONES_PER_VERT];
SkinningJoints(Vertex::Weights weights[MAX_BONES_PER_VERT])
{
for (bUint i = 0; i < MAX_BONES_PER_VERT; i++)
j[i] = weights[i].jid;
}
};
My SkinningWeights struct is almost the same, with an array of float instead of int.
Now when I try to debug the joints index, weights values and final vertex as colors, here is what I get :
// Shader
color_debug = joints_influences;
http://www.images-host.fr/view.php?img=00021800pop.jpg
color_debug = weights_influences;
http://www.images-host.fr/view.php?img=00021800pop2.jpg
Another interesting thing, when I try this :
vec4 pop = vec4(vertexPos, 1) * animation_matrices[1] * inv_bind_matrices[1] * 1.0;
gl_Position = ViewProj * Model * pop;
My all mesh is actually rotating, which means that my uniform animation_matrices is good.
Anyone can see what i'm doing wrong here ?
I finally got it working. For those who may be interested, here is what I was doing wrong :
When I send joints indices array to Glsl, instead of doing this:
glEnableVertexAttribArray(5);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(5));
glVertexAttribPointer(5, 4, GL_INT, GL_FALSE, 0, BUFFER_OFFSET(0));
I needed to do this:
glEnableVertexAttribArray(5);
glBindBuffer(GL_ARRAY_BUFFER, m->GetBuffer(5));
glVertexAttribIPointer(5, 4, GL_INT, 0, BUFFER_OFFSET(0));
You have to look closely to find the difference. Instead of calling glVertexAttribPointer(), I needed to call glVertexAttribIPointer() because joints indices are int.
Hope this will help someone someday.
Did you try debugging your skinning attributes? Output the vertex weight as colors so that you can confirm you have meaningful values? If everything is black you'll know where to look.
From a quick glance at your RenderingVertices I can spot a first problem. You are passing a Vector of pointers to GL which I don't think is what you want to do.
Most of the time you will limit skinning influences to 4 joint/weight pairs per vertex. So you can get away with a simple array (ie. SkinningJoints joints[4];).