I am following along with a youtube tutorial concerning opengl (a graphics programming library). I've created classes called vertex, ShapeData and ShapeGenerator. The overall idea is I'm creating code which will hold data for any type of shape I decide to come up with and want to display to the screen. The problem is my program seems to crash once the first "delete[]" is hit within ShapeData.h in the cleanup() function. Here is the relevant code:
Vertex.h
#pragma once
#include "GLM/glm.hpp"
class Vertex
{
public:
Vertex();
Vertex(glm::vec3 thePosition, glm::vec3 theColor);
glm::vec3 position;
glm::vec3 color;
};
Vertex.cpp
#include "Vertex.h"
Vertex::Vertex()
{}
Vertex::Vertex(glm::vec3 thePosition, glm::vec3 theColor) :
position(thePosition),
color(theColor)
{
}
ShapeData.h
#pragma once
#include "Vertex.h"
#include "GL/glew.h"
struct ShapeData
{
ShapeData() :
verticies(0), numberOfVerts(0),
indicies(0), numberOfIndicies(0)
{}
Vertex* verticies;
GLuint numberOfVerts;
GLushort* indicies;
GLuint numberOfIndicies;
GLsizeiptr VertexBufferSize() const
{
return numberOfVerts * sizeof(Vertex);
}
GLsizeiptr IndexBufferSize() const
{
return numberOfIndicies * sizeof(GLushort);
}
void CleanUp()
{
delete[] verticies;
delete[] indicies;
verticies = 0;
indicies = 0;
numberOfIndicies = 0;
numberOfVerts = 0;
}
};
ShapeGenerator.cpp
#include "ShapeGenerator.h"
ShapeData ShapeGenerator::MakeTriangle()
{
Vertex triangle[] = {
Vertex(glm::vec3(0.0f, 1.0f, 0.0f), glm::vec3(1.0f, 0.0f, 0.0f)),
Vertex(glm::vec3(-1.0f, -1.0f, 0.0f), glm::vec3(1.0f, 0.0f, 0.0f)),
Vertex(glm::vec3(1.0f, -1.0f, 0.0f), glm::vec3(1.0f, 0.0f, 0.0f))
};
ShapeData shapeData;
shapeData.numberOfVerts = sizeof(triangle) / sizeof(*triangle);
shapeData.verticies = new Vertex[shapeData.numberOfVerts];
memcpy(shapeData.verticies, triangle, sizeof(triangle));
shapeData.verticies = triangle;
GLushort indicies[] = { 0,1,2 };
shapeData.numberOfIndicies = sizeof(indicies) / sizeof(*indicies);
shapeData.indicies = new GLushort[shapeData.numberOfIndicies];
memcpy(shapeData.indicies, indicies, sizeof(indicies));
return shapeData;
}
I'm trying to create a triangle and everything works fine without running the cleanup() function within main. Here is the portion where I'm calling Cleanup() in main:
main.cpp
ShapeData triangle = ShapeGenerator::MakeTriangle();
GLuint bufferID;
glGenBuffers(1, &bufferID);
glBindBuffer(GL_ARRAY_BUFFER, bufferID);
glBufferData(GL_ARRAY_BUFFER, triangle.VertexBufferSize(), triangle.verticies, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, 0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(float) * 6, (char*)(sizeof(float) * 3));
GLuint indexBufferID;
glGenBuffers(1, &indexBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, triangle.IndexBufferSize(),triangle.indicies, GL_STATIC_DRAW);
triangle.CleanUp();
You are replacing the new[]'d pointer here. This causes a crash because triangle is not new[]'d.
shapeData.verticies = new Vertex[shapeData.numberOfVerts];
memcpy(shapeData.verticies, triangle, sizeof(triangle));
shapeData.verticies = triangle;
Related
I'm trying to encapsulate OpenGL rendering in a renderer class.
I'm pretty sure my window class works fine (I've checked for errors) but I think the error is in my sprite class but I have no idea what it is.
This is my sprite class:
sprite_renderer.hpp
#ifndef SPRITE_RENDERER_H
#define SPRITE_RENDERER_H
#include <GL/glew.h>
#include "shader.hpp"
class SpriteRenderer
{
public:
SpriteRenderer(Shader &shader);
~SpriteRenderer();
void DrawSprite();
private:
Shader shader;
GLuint quadVAO;
void initRenderData();
};
#endif
sprite_renderer.cpp
#include "sprite_renderer.hpp"
SpriteRenderer::SpriteRenderer(Shader &shader)
{
this->shader = shader;
this->initRenderData();
}
SpriteRenderer::~SpriteRenderer()
{
glDeleteVertexArrays(1, &this->quadVAO);
}
void SpriteRenderer::DrawSprite()
{
this->shader.Use();
glBindVertexArray(this->quadVAO);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindVertexArray(0);
}
void SpriteRenderer::initRenderData()
{
GLuint VBO;
GLfloat vertices[] = {
// Pos
0.0f, 1.0f,
1.0f, 0.0f,
0.0f, 0.0f,
0.0f, 1.0f,
1.0f, 1.0f,
1.0f, 0.0f,
};
glGenVertexArrays(1, &this->quadVAO);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindVertexArray(this->quadVAO);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 2 * sizeof(GLfloat), (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
I'm using it like this in my main.cpp:
int main()
{
Window window = Window();
Shader shader = ResourceManager::LoadShader("./assets/shaders/sprite.vert", "./assets/shaders/sprite.frag", "sprite");
SpriteRenderer* renderer = new SpriteRenderer(shader);
while (window.IsOpen())
{
window.BeginDraw();
renderer->DrawSprite();
window.EndDraw();
}
glfwTerminate();
return 0;
}
Where window.BeginDraw() and window.EndDraw() are just:
void Window::BeginDraw()
{
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
}
void Window::EndDraw()
{
glfwSwapBuffers(window);
glfwPollEvents();
}
Just to rule out the obvious, try sticking a print statement in the destructor of your Shader class. Judging by the design of your Shader class, you should find it extremely easy to end up calling glDeleteProgram prior to actually using the shader.
In general, if you have a wrapper around a GLSL shader or program, you probably want to delete the copy constructor and assignment operator (or implement a ref counting system).
\edit to delete the copy ctor & assignment operator....
Shader(const Shader&) = delete;
Shader& operator = (const Shader&) = delete;
You probably want to manage these shaders somewhere (possibly ref counted), and only store pointers to them within your SpriteRenderer.
I can't see any other problem with the code you've posted above, so the issue may be within your shader, or how you are passing any uniform variables you're using.
i am programming a Super-Hyper-Mega-Lydian-Über-3D-Game and i am struggling to draw multiple geometries one after another.
Drawing a single geometrie works fine, however if i am trying to draw more than one the geometries switch their vertex-data and their shader-program; or they switch positions and rotational-axis. This is how it looks if i draw one cube:
http://i.imgur.com/zmTPc0h.png
This is how it looks like if i draw a spaceship after that cube:
http://i.imgur.com/10HDjm9.png
Here is my code for this application:
void MainLoop::loop()
{
VisibleObject cube("cube.obj", "vertexShader.glsl", "fragmentShaderCube.glsl");
cube.coordinates = glm::vec3(0.0f, 5.0f, -10.0f);
VisibleObject spaceship("spaceship.obj", "vertexShader.glsl", "fragmentShader.glsl");
spaceship.actualAxisOfRotation = glm::vec3(1.0f, 0.0f, 0.0f);
while (gameState != GAMESTATE_EXITING)
{
//check for input
while (SDL_PollEvent(&evnt))
{
switch (evnt.type)
{
case SDL_QUIT:
gameState = GAMESTATE_EXITING;
break;
default:
break;
}
}
//clear depth buffer
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
spaceship.draw();
cube.draw();
//display result
SDL_GL_SwapWindow(window);
}
}
class VisibleObject
{
public:
VisibleObject(std::string modelPath, std::string vertexShaderPath, std::string fragmentShaderPath);
~VisibleObject();
void cleanup();
void draw();
glm::vec3 coordinates; //1.0f = 1.0 meter
glm::vec3 actualAxisOfRotation;
GLfloat actualRotation; //radians off the originposition
glm::vec3 velocity; //meters per second
glm::vec3 axisOfRotation;
GLfloat rotationSpeed; //radians per second
private:
short numberOfIndices;
std::vector<short> indices;
short numberOfVertices;
std::vector<glm::vec3> vertices;
glm::mat4 fullMatrix;
GLuint fullMatrixUniformLocation;
GLuint shaderProgramID;
GLuint vertexBufferID;
GLuint indexBufferID;
GLuint vertexArrayObjectID;
};
VisibleObject::VisibleObject(std::string modelPath, std::string vertexShaderPath, std::string fragmentShaderPath)
{
coordinates = glm::vec3(0.0f, 0.0f, -10.0f);
actualAxisOfRotation = glm::vec3(0.0f, 1.0f, 0.0f);
actualRotation = 2.0f;
velocity = glm::vec3(0.0f, 0.0f, 0.0f);
axisOfRotation = glm::vec3(0.0f, 1.0f, 0.0f);
rotationSpeed = 0.0f;
//create shader-program
shaderProgramID = GlslUtilities::installShader(vertexShaderPath, fragmentShaderPath);
//create transformation-matrix-uniform
fullMatrixUniformLocation = glGetUniformLocation(shaderProgramID, "f_matrix");
//load model data
numberOfIndices = 0;
numberOfVertices = 0;
indices.clear();
vertices.clear();
something_that_works_and_loads_the_vertex_and_index_data_from_an_obj-file_into_the_vectors();
//sending data to opengl
glGenVertexArrays(1, &vertexArrayObjectID);
glGenBuffers(1, &indexBufferID);
glGenBuffers(1, &vertexBufferID);
glBindVertexArray(vertexArrayObjectID);
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferID);
glBufferData(GL_ARRAY_BUFFER, vertices.size()*sizeof(vertices[0]), &vertices[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(glm::vec3), 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(indices[0]), &indices[0], GL_STATIC_DRAW);
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
}
VisibleObject::~VisibleObject()
{
}
void VisibleObject::cleanup()
{
glUseProgram(0);
glDeleteProgram(shaderProgramID);
glDeleteBuffers(1, &vertexBufferID);
glDeleteBuffers(1, &indexBufferID);
glDeleteBuffers(1, &vertexArrayObjectID);
}
void VisibleObject::draw()
{
glBindVertexArray(vertexArrayObjectID);
fullMatrix = glm::perspective(glm::radians(85.0f), 800.0f / 600, 0.1f, 150.0f);
fullMatrix = glm::translate(fullMatrix, coordinates);
fullMatrix = glm::rotate(fullMatrix, glm::radians(((float)SDL_GetTicks())/10), actualAxisOfRotation);
glUniformMatrix4fv(fullMatrixUniformLocation, 1, GL_FALSE, &fullMatrix[0][0]);
glUseProgram(shaderProgramID);
glDrawElements(GL_TRIANGLES, numberOfIndices, GL_UNSIGNED_SHORT, 0);
}
You are uploading the fullMatrix to the shader before binding the shader. Since glMatrix4fv calls always operate on the currently bound shader, you are using wrong matrices when drawing.
Solution: Call the functions in the correct order
glUseProgram(shaderProgramID);
glUniformMatrix4fv(fullMatrixUniformLocation, 1, GL_FALSE, &fullMatrix[0][0]);
glDrawElements(GL_TRIANGLES, numberOfIndices, GL_UNSIGNED_SHORT, 0);
I have been fiddling around with making a game/rendering engine, and I have found that I can have a class for a shader object, but if I wrap a VAO in a class, it won't render.
The shaders return no errors, and the VAO and shaders are valid OpenGL objects.
UPDATE
The problem is this line:
glBufferData(GL_ARRAY_BUFFER, sizeof(arrFVertex), arrFVertex, GL_STATIC_DRAW);
As #BDL suggested in the comments, I thought about it and I realized, it should be:
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * intNumVertex * 3, arrFVertex, GL_STATIC_DRAW);
UPDATE 2
In response to being put on hold, here is a Minimum Complete and Verifiable Example:
#include <OpenGL/gl3.h>
#include <SDL2/SDL.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
SDL_Window *window = NULL;
SDL_GLContext openGLRenderer;
bool bolRunning = true;
int intGLVersionMajor, intGLVersionMinor;
GLfloat arrFVertex[] = {
0.5f, 0.5f, 0.0f, // Top Right
0.5f, -0.5f, 0.0f, // Bottom Right
-0.5f, 0.5f, 0.0f, // Top Left
0.5f, -0.5f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, // Bottom Left
-0.5f, 0.5f, 0.0f // Top Left
};
GLuint intVAO;
GLuint intVBO;
GLuint intShaderAttribPosition;
GLuint intShaderProgram;
GLuint intNumVertex = 6;
void loadShaders(const char *strVertexShaderSource, const char *strFragmentShaderSource) {
intShaderProgram = glCreateProgram();
GLuint intVertexShader;
intVertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(intVertexShader, 1, &strVertexShaderSource, NULL);
glCompileShader(intVertexShader);
GLuint intFragmentShader;
intFragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(intFragmentShader, 1, &strFragmentShaderSource, NULL);
glCompileShader(intFragmentShader);
glAttachShader(intShaderProgram, intVertexShader);
glAttachShader(intShaderProgram, intFragmentShader);
glLinkProgram(intShaderProgram);
glDeleteShader(intVertexShader);
glDeleteShader(intFragmentShader);
}
void buildVAO(GLfloat *arrFVertex) {
intShaderAttribPosition = glGetAttribLocation(intShaderProgram, "f3Position");
glGenVertexArrays(1, &intVAO);
glBindVertexArray(intVAO);
glGenBuffers(1, &intVBO);
glBindBuffer(GL_ARRAY_BUFFER, intVBO);
glVertexAttribPointer(intShaderAttribPosition, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), (GLvoid *)0);
glEnableVertexAttribArray(intShaderAttribPosition);
glBufferData(GL_ARRAY_BUFFER, sizeof(arrFVertex), arrFVertex, GL_STATIC_DRAW);
glBindVertexArray(0);
}
int main(int argc, char **argv) {
SDL_Init(SDL_INIT_EVERYTHING);
window = SDL_CreateWindow("GSEngine",
SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED,
640, 480,
SDL_WINDOW_OPENGL);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 3);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_CORE);
if (window == NULL) {
printf("Could not create window: %s\n", SDL_GetError());
exit(1);
}
openGLRenderer = SDL_GL_CreateContext(window);
SDL_GL_MakeCurrent(window, openGLRenderer);
glViewport(0, 0, 640, 480);
loadShaders("#version 330 core\n\
in vec3 f3Position;\n\
void main() {\n\
gl_Position = vec4(f3Position, 1.0);\n\
}", "#version 330 core\n\
out vec4 f4Color;\n\
void main() {\n\
f4Color = vec4(1.0f, 0.5f, 0.2f, 1.0f);\n\
}");
buildVAO(arrFVertex);
while (bolRunning) {
SDL_Event event;
while (SDL_PollEvent(&event)) {
if (event.type == SDL_QUIT) {
bolRunning = false;
}
}
SDL_GL_MakeCurrent(window, openGLRenderer);
glClearColor(0.0f, 0.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(intShaderProgram);
glDrawArrays(GL_TRIANGLES, 0, intNumVertex);
SDL_GL_SwapWindow(window);
}
glDeleteBuffers(1, &intVBO);
glDeleteVertexArrays(1, &intVAO);
glDeleteShader(intShaderProgram);
SDL_GL_DeleteContext(openGLRenderer);
SDL_DestroyWindow(window);
SDL_Quit();
return 0;
}
The problem has nothing to do with the VAO, but with the VBO. Since you pass a pointer to the constructor:
void GSMesh::build(GLfloat *arrFVertex, GSShader *shader, int _intNumVertex)
{
glBufferData(GL_ARRAY_BUFFER, sizeof(arrFVertex), arrFVertex, GL_STATIC_DRAW);
}
sizeof(arrFVertex) = sizeof(GLfloat*) which is the size of the pointer, not the size of the array pointed to. The correct code will look like this:
glBufferData(GL_ARRAY_BUFFER,
sizeof(GLfloat) * _intNumVertex * 3, arrFVertex,
GL_STATIC_DRAW);
In general I have to add, that this is not the way how questions should be asked on SO. It would have been good if you would have included at least the relevant parts of the code in your question.
Despite what the spec says, with some drivers you must enable the shader before you can get the location of an attribute or uniform. This may be what is causing your problems.
In your code that would mean in your GSMesh::build method adding:
shader->use();
Before:
intShaderAttribPosition = glGetAttribLocation(shader->intShaderProgram, "f3Position");
Personally if the version of OpenGL that you are using has support for Vertex Attribute Indexes I'd use them instead.
In the vertex shader you could have something like:
layout (location = 0) in vec3 position;
layout (location = 1) in vec2 tex_coords;
And then in your mesh class all you need is:
struct Vertex
{
glm::vec3 position;
glm::vec2 tex_coords;
};
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)offsetof(Vertex, position);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(Vertex), (GLvoid*)offsetof(Vertex, tex_coords));
I have fixed it - working solution can be found at the bottom of the page.
I am trying to play with interleaved vbo and vao. I am testing it on a simple triangle - this is how my index data and vertex data look like:
vertex_col data[] = {
vertex_col(vec3(0.5f, 0.5f, 0.5f), vec4(0.0f, 0.0f, 1.0f, 1.0f), vec3(0.0f, 0.0f, 0.0f)),
vertex_col(vec3(-0.5f, -0.5f, -0.5f), vec4(0.0f, 1.0f, 0.0f, 1.0f), vec3(0.0f, 0.0f, 0.0f)),
vertex_col(vec3(-0.5f, 0.5f, 0.5f), vec4(1.0f, 0.0f, 0.0f, 1.0f), vec3(0.0f, 0.0f, 0.0f)),
};
GLushort indices[] = {
0, 1, 2
};
ModelLoader loader;
model = loader.loadToVAO(indices, 3, data, 3);
This is vertex_col struct:
struct vertex_col {
public:
vertex_col(const vec3& pos, const vec4& col, const vec3& normal) {
this->pos = pos;
this->colour = col;
this->normal = normal;
}
vec3 pos;
vec4 colour;
vec3 normal;
};
This is how I load the data into vao and vbo:
RawModel ModelLoader::loadToVAO(const GLushort *indices, int m, const vertex_col *vertices, int n) {
GLuint vaoID = createVAO();
GLuint vboID;
glGenBuffers(1, &vboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glBufferData(GL_ARRAY_BUFFER, n * sizeof(vertex_col), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_col), (void*) offsetof(vertex_col, pos));
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, sizeof(vertex_col), (void*) offsetof(vertex_col, colour));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_col), (void*) offsetof(vertex_col, normal));
GLuint vboID_2;
glGenBuffers(1, &vboID_2);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID_2);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, m * sizeof(GLushort), indices, GL_STATIC_DRAW);
unbindVAO();
return RawModel(vaoID, m);
}
GLuint ModelLoader::createVAO() {
GLuint vaoID;
glGenVertexArrays(1, &vaoID);
glBindVertexArray(vaoID);
return vaoID;
}
And this is how I draw the vao:
void Renderer::render(const RawModel &model) {
shaders["basic"].use();
glBindVertexArray(model.getVaoID());
glDrawElements(GL_TRIANGLES, model.getVertexCount(), GL_UNSIGNED_SHORT, nullptr);
glBindVertexArray(0);
shaders["basic"].release();
}
The last operation causes this error 'Access violation reading location 0x00000000' and I have no idea why. Also if it is any relevant to the question, I am using libraries glew and glfw.
Fixed version:
RawModel *ModelLoader::loadToVAO(const GLushort *indices, int m, const vertex_col *vertices, int n) {
GLuint vaoID = createVAO();
GLuint vboID = 0;
glGenBuffers(1, &vboID);
glBindBuffer(GL_ARRAY_BUFFER, vboID);
glBufferData(GL_ARRAY_BUFFER, n * sizeof(vertex_col), vertices, GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_col), (void*) offsetof(vertex_col, pos));
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 4, GL_FLOAT, GL_FALSE, sizeof(vertex_col), (void*) offsetof(vertex_col, colour));
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(vertex_col), (void*) offsetof(vertex_col, normal));
vboID = 0;
glGenBuffers(1, &vboID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, vboID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, m * sizeof(GLushort), indices, GL_STATIC_DRAW);
unbindVAO();
return new RawModel(vaoID, m);
}
The Problem:
It seems as if the second GLuint buffer is not being read in properly.
Update: So the problem must be when I try to input the data to the shader. I rewrote the code (the old code is still below) to use swizzling for the index parameter. That was the only way I could get it to work. I would like to use multiple glVertexAttribPointer's, but every time I try to gives me the same undefined results.
What I Am Trying To Do:
I'm testing out very simple skinned animation with a very simplified shader,
#version 330 core
in vec2 model;
in uint jointID;
const int MAX_JOINTS = 10;
uniform mat4 joints[MAX_JOINTS];
void main()
{
gl_Position = joints[jointID] * vec4(model, 0.0f, 1.0f);
}
I input some simple data,
const GLfloat vertices[] =
{
// upper arm
0.0f, 0.0f,
0.4f, 0.0f,
0.4f, 0.2f,
0.0f, 0.0f,
0.4f, 0.2f,
0.0f, 0.2f,
// lower arm
0.4f, 0.0f,
0.8f, 0.0f,
0.8f, 0.2f,
0.4f, 0.0f,
0.8f, 0.2f,
0.4f, 0.2f
};
const GLuint indices[] =
{
// upper arm
0,
1,
1,
0,
1,
0,
// lower arm
1,
1,
1,
1,
1,
1
};
(The first array containing the vertices and the second array containing the corresponding boneID's.) Oddly, the boneID's never seem too equal 1 because when I make the matrix at index 1 some really funky value, the vertices remain untransformed. This leads me to believe that it is a problem with the way I set up my glVertexAttribPointer's,
void SkinnedModel::draw()
{
shaderProgram.use();
glEnableVertexAttribArray(modelLoc);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexAttribPointer(modelLoc, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(jointIDLoc);
glBindBuffer(GL_ARRAY_BUFFER, indexBuffer);
glVertexAttribPointer(jointIDLoc, 1, GL_UNSIGNED_INT, GL_FALSE, 0, NULL);
glUniformMatrix4fv(jointsLoc, 10, GL_FALSE, (GLfloat*)&poseMatrices);
glDrawArrays(GL_TRIANGLES, 0, numVertices);
glDisableVertexAttribArray(modelLoc);
glDisableVertexAttribArray(jointIDLoc);
}
I've been banging my head against the desk for the past few hours looking at what seems to be correct code. Anyway, it's probably something dumb I missed. Any help is appreciated.
Here is all the relevant source code (just in case):
SkinnedModel.h
#pragma once
#include "stl/DataTypes.h"
#include "Shader.h"
#include <Dense>
using namespace Eigen;
struct Joint
{
Joint** children;
Joint* parent;
U32 index;
};
class SkinnedModel
{
public:
static void init();
static void destroy();
SkinnedModel();
~SkinnedModel();
void create(const GLfloat* vertices, const GLuint* jointIndices, GLint numVertices, Joint* rootJoint);
void draw();
void rotate(Joint* joint, F32 angle, F32 x, F32 y);
GLuint vertexBuffer;
GLuint indexBuffer;
GLint numVertices;
//GLint numJoints;
Joint* root;
Matrix<GLfloat,4,4> poseMatrices[10];
static ShaderProgram shaderProgram;
static GLuint modelLoc;
static GLuint jointIDLoc;
static GLuint modelViewMatrixLoc;
static GLuint jointsLoc;
};
SkinnedModel.cpp
#include "SkinnedModel.h"
ShaderProgram SkinnedModel::shaderProgram;
GLuint SkinnedModel::modelLoc = -1;
GLuint SkinnedModel::jointIDLoc = -1;
GLuint SkinnedModel::modelViewMatrixLoc = -1;
GLuint SkinnedModel::jointsLoc = -1;
void SkinnedModel::init()
{
ShaderProgramSettings shaderPS;
shaderPS.loadShader("Skinned.v.glsl", ShaderType::VERTEX);
shaderPS.loadShader("Skinned.f.glsl", ShaderType::FRAGMENT);
shaderProgram = shaderPS.create();
shaderProgram.use();
modelLoc = shaderProgram.getAttrib("model");
jointIDLoc = shaderProgram.getAttrib("jointID");
//modelViewMatrixLoc = shaderProgram.getUniform("modelViewMatrix");
jointsLoc = shaderProgram.getUniform("joints");
}
void SkinnedModel::destroy()
{
shaderProgram.destroy();
}
SkinnedModel::SkinnedModel()
{
}
SkinnedModel::~SkinnedModel()
{
glDeleteBuffers(1, &vertexBuffer);
glDeleteBuffers(1, &indexBuffer);
}
void SkinnedModel::create(const GLfloat* vertices, const GLuint* jointIndices, GLint numVertices, Joint* rootJoint)
{
this->numVertices = numVertices;
this->root = rootJoint;
for(U32 i=0;i<10;++i)
{
poseMatrices[i] = Matrix<GLfloat,4,4>::Identity();
}
poseMatrices[1] = Matrix<GLfloat,4,4>::Zero(); // <--- This should mess it up!
// Creating buffers
glDeleteBuffers(1, &vertexBuffer);
glGenBuffers(1, &vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, numVertices*2*sizeof(GLfloat), vertices, GL_STATIC_DRAW);
glDeleteBuffers(1, &indexBuffer);
glGenBuffers(1, &indexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, indexBuffer);
glBufferData(GL_ARRAY_BUFFER, numVertices*sizeof(GLuint), jointIndices, GL_STATIC_DRAW);
}
void SkinnedModel::draw()
{
shaderProgram.use();
glEnableVertexAttribArray(modelLoc);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexAttribPointer(modelLoc, 2, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray(jointIDLoc);
glBindBuffer(GL_ARRAY_BUFFER, indexBuffer);
glVertexAttribPointer(jointIDLoc, 1, GL_UNSIGNED_INT, GL_FALSE, 0, NULL);
glUniformMatrix4fv(jointsLoc, 10, GL_FALSE, (GLfloat*)&poseMatrices);
glDrawArrays(GL_TRIANGLES, 0, numVertices);
glDisableVertexAttribArray(modelLoc);
glDisableVertexAttribArray(jointIDLoc);
}
void SkinnedModel::rotate(Joint* joint, F32 angle, F32 x, F32 y)
{
F32 rcos = cos(angle);
F32 rsin = sin(angle);
Matrix<GLfloat, 4, 4> rotMatrix = Matrix<GLfloat, 4, 4>::Identity();
rotMatrix(0,0) = rcos;
rotMatrix(0,1) = -rsin;
rotMatrix(1,0) = rsin;
rotMatrix(1,1) = rcos;
rotMatrix(0,3) = x-rcos*x+rsin*y;
rotMatrix(1,3) = y-rsin*x-rcos*y;
poseMatrices[joint->index] *= rotMatrix;
}
Game.cpp
void Game::init()
{
GUI::init();
SkinnedModel::init();
getScreen()->setBackgroundColor(1.0f, 1.0f, 1.0f);
const GLfloat vertices[] =
{
// upper arm
0.0f, 0.0f,
0.4f, 0.0f,
0.4f, 0.2f,
0.0f, 0.0f,
0.4f, 0.2f,
0.0f, 0.2f,
// lower arm
0.4f, 0.0f,
0.8f, 0.0f,
0.8f, 0.2f,
0.4f, 0.0f,
0.8f, 0.2f,
0.4f, 0.2f
};
const GLuint indices[] =
{
// upper arm
0,
1,
1,
0,
1,
0,
// lower arm
1,
1,
1,
1,
1,
1
};
upperArm.parent = 0;
upperArm.children = new Joint*[1];
upperArm.children[0] = &lowerArm;
upperArm.index = 0;
lowerArm.parent = &upperArm;
lowerArm.children = 0;
lowerArm.index = 1;
m.create(vertices, indices, 12, &upperArm);
//m.rotate(&lowerArm, PI/4, 0.4f, 0.1f);
//DEBUG("SIZE %i", sizeof(Eigen::Matrix<GLfloat,4,4>));
}
void Game::loop(double dt)
{
m.draw();
}
Update: Apparently if all values for the boneID are set to 1 it never uses 1 in the shader either #.#. So the second array isn't even being read, or it's not being read correctly.
You need to use glVertexAttribIPointer if you are using integer vertex attributes (i.e. something you have declared in uint or in int etc. in your vertex shader).
replace
glVertexAttribPointer(jointIDLoc, 1, GL_UNSIGNED_INT, GL_FALSE, 0, NULL);
with
glVertexAttribIPointer(jointIDLoc, 1, GL_UNSIGNED_INT, 0, NULL);
(note that glVertexAttribIPointer doesn't take the normalized parameter)