Assistance in Debug OpenGL glsl Shader or Code using it - c++

I am working on adding a phong shader to my working program. Basically, after I implemented my new shaders, my code gets a "Segmentation Fault: 11" during:
glDrawArrays(GL_TRIANGLES, 0, mCubes.getArrayNumberOfElements());
I know the number of elements is correct because it worked for my previous, simple shader.
Here is my Vertex Shader:
// vertex shader
attribute vec4 vPosition;
attribute vec3 vNormal;
varying vec4 color; //vertex shader
// light and material properties
uniform vec4 AmbientProduct, DiffuseProduct, SpecularProduct;
uniform mat4 ModelView;
//uniform mat4 Projection;
uniform vec4 LightPosition;
uniform float Shininess;
vec3 L, H, N, pos, E;
vec4 diffuse, specular, ambient;
float Kd, Ks;
void main()
{
// Transform vertex position into eye coordinates
pos = (ModelView * vPosition).xyz;
L = normalize( LightPosition.xyz - pos );
E = normalize( -pos );
H = normalize( L + E );
// Transform vertex normal into eye coordinates
N = normalize( ModelView*vec4(vNormal, 0.0) ).xyz;
// Compute terms in the illumination equation
ambient = AmbientProduct;
Kd = max( dot(L, N), 0.0 );
diffuse = Kd*DiffuseProduct;
Ks = pow( max(dot(N, H), 0.0), Shininess );
specular = Ks * SpecularProduct;
if( dot(L, N) < 0.0 )
specular = vec4(0.0, 0.0, 0.0, 1.0);
gl_Position = ModelView * vPosition;
color = ambient + diffuse + specular;
color.a = 1.0;
}
Here is my display function in which the code ends up getting the fault:
void display(void) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
vector<float> cell = mCubes.getCell();
mat4 matrix = rot * scale(1.0/cell[0], 1.0/cell[1], 1.0/cell[2]) * translate(-cell[0]/2.0, -cell[1]/2.0, -cell[2]/2.0);
glUniformMatrix4fv(vShaderModelView, 1, GL_TRUE, matrix);
glDrawArrays(GL_TRIANGLES, 0, mCubes.getArrayNumberOfElements());
glutSwapBuffers();
glFlush();
}
And here is my init function that mostly sets up and interacts with the shaders:
void init() {
// Create a vertex array object
GLuint vao;
#ifdef __APPLE__
glGenVertexArraysAPPLE( 1, &vao );
glBindVertexArrayAPPLE( vao );
#else
glGenVertexArrays( 1, &vao );
glBindVertexArray( vao );
#endif
// Create and initialize a buffer object
GLuint buffer;
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData( GL_ARRAY_BUFFER,
mCubes.getDisplayArraySize() + mCubes.getDisplayArraySize()*3, NULL, GL_STATIC_DRAW );
GLintptr offset = 0;
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getDisplayArraySize(), mCubes.getDisplayArray());
offset+= mCubes.getDisplayArraySize();
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getDisplayArraySize(), mCubes.getNormalVector());
// Load shaders and use the resulting shader program
string evname = "PROTCAD3DIR";
string path = PCGeneralIO::getEnvironmentVariable(evname);
path += "/data/shaders/";
#ifdef __APPLE__
string vshadername = path + "kw_vshader1_mac.glsl";
string fshadername = path + "kw_fshader1_mac.glsl";
//#else
// string vshadername = path + "kw_vshader1.glsl";
// string fshadername = path + "kw_fshader1.glsl";
#endif
GLuint program = InitShader( vshadername.c_str(), fshadername.c_str() );
glUseProgram(program);
// Initialize the vertex position attribute from the vertex shader
GLuint vShaderPosition = glGetAttribLocation(program, "vPosition");
glEnableVertexAttribArray(vShaderPosition);
glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
GLuint vShaderNormal = glGetAttribLocation(program, "vNormal");
glEnableVertexAttribArray(vShaderNormal);
//glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset)); //this was the ORIGINAL PROBLEM, now commented out and below is solution
glVertexAttribPointer(vShaderNormal, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
vShaderModelView = glGetUniformLocation(program, "ModelView");
vShaderLightPosition = glGetUniformLocation(program, "LightPosition");
vShaderAmbientProduct = glGetUniformLocation(program, "AmbientProduct");
vShaderDiffuseProduct = glGetUniformLocation(program, "DiffuseProduct");
vShaderSpecularProduct = glGetUniformLocation(program, "SpecularProduct");
vShaderShininess = glGetUniformLocation(program, "SpecularProduct");
glEnable( GL_DEPTH_TEST );
vec4 light = vec4(0.5,1.5,1.0,0.0);
glUniform4fv(vShaderLightPosition, 1, light);
vec4 amb = vec4(1.0f,0.0f,0.20f,1.0f);
glUniform4fv(vShaderAmbientProduct, 1, amb);
vec4 diff = vec4(0.5f,0.5f,0.5f,1.0f);
glUniform4fv(vShaderDiffuseProduct, 1, diff);
vec4 spec = vec4(0.80f,0.80f,0.80f,1.0f);
glUniform4fv(vShaderSpecularProduct, 1, spec);
float shin = 6.0f;
glUniform1f(vShaderShininess,shin);
glClearColor(.2, .2, .2, 1); /* Grey background */
}
If you have any question, feel free to ask and I will elaborate. I feel that either the vertex shader itself has a problem, or the way I interact with the shader is doing something wonky. Any help or suggestions are accepted!
EDIT::: (code edited to reflect solution)The problem was in the second:
glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
which should have read:
glVertexAttribPointer(vShaderNormal, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
And was a stupid copy/paste mistake. However, the finished product still does not look correct:
![at Rotation 0 it seems to be fully colored][1]
http://i.stack.imgur.com/CKJ3f.png
![Rotation of a little bit reveals some odd behavior][2]
http://i.stack.imgur.com/kyRfI.png
![Even more rotation leads you to pull your hair out][3]
i.stack.imgur.com/lYOzK.png
![Then it whites out and you know i screwed up!!][4]
i.stack.imgur.com/FZcqF.png
So, as you rotate the color gets screwed up and turns white, black, patterned and everything, but this is obviously incorrect.
Edit::: This is my attempt to "Correct" the issue of passing the wrong amount of values with vNormal:
void init() {
// Create a vertex array object
GLuint vao;
#ifdef __APPLE__
glGenVertexArraysAPPLE( 1, &vao );
glBindVertexArrayAPPLE( vao );
#else
glGenVertexArrays( 1, &vao );
glBindVertexArray( vao );
#endif
// Create and initialize a buffer object
GLuint buffer;
realVec *normArray = new realVec[mCubes.getNormalArraySize()];//vec4 array compared to vec3 array
normArray = mCubes.getNormalVector(); // new array of normals
for(int i=0; i<mCubes.getArrayNumberOfElements();i++){
printf("Normal at %d is %f \n",i,normArray[i][0]); //to print normals
printf("Normal at %d is %f \n",i,normArray[i][1]); //to print normals
printf("Normal at %d is %f \n",i,normArray[i][2]); //to print normals
}
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData( GL_ARRAY_BUFFER,
mCubes.getDisplayArraySize() + mCubes.getNormalArraySize(), NULL, GL_STATIC_DRAW ); //Changed size for vec3 array of normals
GLintptr offset = 0;
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getDisplayArraySize(), mCubes.getDisplayArray());
offset+= mCubes.getDisplayArraySize();
glBufferSubData(GL_ARRAY_BUFFER, offset, mCubes.getNormalArraySize(), normArray);
// Load shaders and use the resulting shader program
string evname = "PROTCAD3DIR";
string path = PCGeneralIO::getEnvironmentVariable(evname);
path += "/data/shaders/";
#ifdef __APPLE__
string vshadername = path + "kw_vshader1_mac.glsl";
string fshadername = path + "kw_fshader1_mac.glsl";
//#else
// string vshadername = path + "kw_vshader1.glsl";
// string fshadername = path + "kw_fshader1.glsl";
#endif
GLuint program = InitShader( vshadername.c_str(), fshadername.c_str() );
glUseProgram(program);
//offset =0;
// Initialize the vertex position attribute from the vertex shader
GLuint vShaderPosition = glGetAttribLocation(program, "vPosition");
glEnableVertexAttribArray(vShaderPosition);
glVertexAttribPointer(vShaderPosition, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(0));
GLuint vShaderNormal = glGetAttribLocation(program, "vNormal");
glEnableVertexAttribArray(vShaderNormal);
glVertexAttribPointer(vShaderNormal, 3, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
//vShaderMatrix = glGetUniformLocation(program, "vMatrix");
//vShaderColor = glGetUniformLocation(program, "vColor")
vShaderModelView = glGetUniformLocation(program, "ModelView");
vShaderLightPosition = glGetUniformLocation(program, "LightPosition");
//vShaderProjection = glGetUniformLocation(program, "Projection");
vShaderAmbientProduct = glGetUniformLocation(program, "AmbientProduct");
vShaderDiffuseProduct = glGetUniformLocation(program, "DiffuseProduct");
vShaderSpecularProduct = glGetUniformLocation(program, "SpecularProduct");
vShaderShininess = glGetUniformLocation(program, "SpecularProduct");
glEnable( GL_DEPTH_TEST );
vec4 light = vec4(0.5,1.5,1.0,0.0);
glUniform4fv(vShaderLightPosition, 1, light);
vec4 amb = vec4(1.0f,0.0f,0.20f,1.0f);
glUniform4fv(vShaderAmbientProduct, 1, amb);
vec4 diff = vec4(0.5f,0.5f,0.5f,1.0f);
glUniform4fv(vShaderDiffuseProduct, 1, diff);
vec4 spec = vec4(0.80f,0.80f,0.80f,1.0f);
glUniform4fv(vShaderSpecularProduct, 1, spec);
float shin = 6.0f;
glUniform1f(vShaderShininess,shin);
glClearColor(.2, .2, .2, 1); /* Grey background */
}
Should I maybe change the light, ambient, specular, and diffuse properties? I am not sure what the problem is.

You pass your vNormal attribute data using the following code
glVertexAttribPointer(vShaderNormal, 4, GL_FLOAT, GL_FALSE, 0, BUFFER_OFFSET(offset));
This indicates that your normal have 4 components, whereas in your vertex shader you declare it as
attribute vec3 vNormal;
This mismatch may be related to your problem if the normals are misinterpreted.

Related

Fragment shader color error with multiple buffers

I've been trying OpenGL recently and are stuck again in an issue.
If in my program I set colors via uniforms, I can draw multiple vertex arrays with any color of my choice. But passing of two buffers to be generated for an vertex array object results in weird coloration, where 0 is for vertex location and 1 is for color.
My main function :
int main(){
Window window(960,540);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
Reader read1("src/shaders/test.vert");
Reader read2("src/shaders/test.frag");
char * r1 = read1.getData();
char * r2 = read2.getData();
GLfloat vert[] = {
0, 0, 0,
0, 3, 0,
8, 3, 0,
8, 0, 0
};
GLushort indices[] = {
0,1,2,
2,3,0
};
GLfloat colors[] = {
1, 0, 1, 1,
1, 0, 1, 1,
1, 0, 1, 1,
1, 0, 1, 1,
};
VertexArray vao;
Buffer* vbo = new Buffer(vert, 4 * 4, 3);
vao.addBuffer(vbo, 0);
vao.addBuffer(new Buffer(colors,4 * 4 , 4), 1);
indexBuffer ibo(indices, 6);
Shader shader(r1, r2);
shader.enable();
shader.setUniformMat4("pr_matrix", mat4::orthographic(0.0f, 16.0f, 0.0f, 9.0f, -1.0f, 1.0f));
shader.setUniformMat4("ml_matrix", mat4::translation(vec3(4, 3, 0)));
shader.setUniform2f("light_pos", vec2(8.0f, 4.5f));
shader.setUniform4f("colour", vec4(0.2, 0.3, 0.8, 1));
while (!window.closed()){
window.clear();
double x, y;
x = window.getX();
y = window.getY();
shader.setUniform2f("light_pos", vec2((float)((x)*16.0f / 960.0f), (float)(9 - 9 * (y) / 540.0f)));
vao.bind();
ibo.bind();
shader.setUniform4f("colour", vec4(0.2, 0.3, 0.8, 1));
shader.setUniformMat4("ml_matrix", mat4::translation(vec3(4, 3, 0)));
glDrawElements(GL_TRIANGLES, ibo.getCount(), GL_UNSIGNED_SHORT, 0);
ibo.unbind();
vao.unbind();
window.update();
}
return 0;
}
My vertex shaders :
#version 410 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec4 color;
uniform mat4 pr_matrix ;
uniform mat4 vw_matrix = mat4(1.0f);
uniform mat4 ml_matrix = mat4(1.0f);
out DATA{
vec4 position;
vec4 color;
} vs_out;
out vec4 pos;
void main(){
gl_Position = pr_matrix * vw_matrix * ml_matrix * vec4(position,1) ;
vs_out.position = ml_matrix * vec4(position,1);
vs_out.color = color;
}
My fragment shaders :
#version 410 core
layout(location = 0) out vec4 color ;
uniform vec4 colour;
uniform vec2 light_pos;
in DATA{
vec4 position;
vec4 color;
} fs_in;
void main(){
float intensity = 1.0f / length(fs_in.position.xy - light_pos);
//color = fs_in.color * intensity;
color = fs_in.color * intensity;
}
My buffer class in case its needed to be corrected:
Buffer::Buffer(GLfloat *data, GLsizei count, GLuint compCountExt) : compCount (compCountExt) {
glGenBuffers(1, &bufferId);
glBindBuffer(GL_ARRAY_BUFFER,bufferId);
glBufferData(GL_ARRAY_BUFFER, count* sizeof(GLfloat), data, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void Buffer::bind() const {
glBindBuffer(GL_ARRAY_BUFFER, bufferId);
}
void Buffer::unbind() const {
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
EDIT:
code of the vertexArray Class:
VertexArray::VertexArray(){
glGenVertexArrays(1,&arrayID);
}
void VertexArray::bind() const{
glBindVertexArray(arrayID);
}
void VertexArray::unbind() const{
glBindVertexArray(0);
}
VertexArray::~VertexArray(){
}
void VertexArray::addBuffer(Buffer* buffer, GLuint index){
bind();
glBindBuffer(GL_ARRAY_BUFFER, arrayID);
glEnableVertexAttribArray(index);
glVertexAttribPointer(index, buffer->getComCount(), GL_FLOAT, GL_FALSE, 0, 0);
buffer->unbind();
unbind();
}
there are calls to vertex attrib pointer in this class.
glVertexAttribPointer refers to the currently bound array buffer. This means you have to bind the array buffer befor you use glVertexAttribPointer:
void VertexArray::addBuffer(Buffer* buffer, GLuint index){
bind();
// glBindBuffer(GL_ARRAY_BUFFER, arrayID); <---- skip
buffer->bind(); // <---- bind the array buffer
glEnableVertexAttribArray(index);
glVertexAttribPointer(index, buffer->getComCount(), GL_FLOAT, GL_FALSE, 0, 0);
buffer->unbind();
unbind();
}
See OpenGL 4.6 Specification - 10.3.9 Vertex Arrays in Buffer Objects:
A buffer object binding point is added to the client state associated with each
vertex array index. The commands that specify the locations and organizations of vertex arrays copy the buffer object name that is bound to ARRAY_BUFFER to
the binding point corresponding to the vertex array index being specified. For example, the VertexAttribPointer command copies the value of ARRAY_BUFFER_BINDING.

opengl mesh drawing incorrectly

Is there a reason as to why my mesh isn't drawing correctly? I loaded an .OBJ model from a file and tried to draw it but it always comes out deformed. Any help would be appreciated.
Here is my code:
void Renderer::draw_model(const std::vector<Vector3>& vertex_array, const std::vector<unsigned int>& element_array, const std::vector<Vector2>& uv_array, const std::vector<Vector3>& normal_array, double x, double y, double z, double rx, double ry, double rz, double sx, double sy, double sz, const std::vector<Texture *>& texture_array, double red, double green, double blue, double alpha)
{
#ifdef DOKUN_OPENGL // OpenGL is defined
if(get_current_rendering_API() != "OpenGL") // but it is not set as the current rendering API
return;
#ifdef __windows__
if(!wglGetCurrentContext())
{
Logger("Rendering Failed : No OpenGL Context found");
return;
}
#endif
#ifdef __gnu_linux__
#ifdef DOKUN_X11
if(!glXGetCurrentContext())
{
Logger("Rendering failed : No OpenGL Context found");
return;
}
#endif
#endif
if(vertex_array.empty())
{
Logger("Rendering failed : Vertex list is empty!");
return;
}
if(element_array.empty())
{
Logger("Rendering failed : Element list is empty!");
return;
}
const GLchar * vertex_source[] =
{
"#version 400\n"
"layout(location = 0) in vec3 position;\n"
"layout(location = 1) in vec2 tex_coord;\n"
"layout(location = 2) in vec3 normal;\n"
" \n"
" \n"
" \n"
"uniform mat4 proj; // zooming \n"
"uniform mat4 view; // camera pos \n"
"uniform mat4 model; // object \n"
" \n"
" \n"
" \n"
" \n"
"out vec3 Normal ;\n"
"out vec2 Texcoord;\n"
"\n"
"out vec3 frag_position;\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"\n"
"void main(void) \n"
"{ \n"
"//frag_position = vec3(model * vec4(position, 1));\n"
"Texcoord = tex_coord;\n"
"Normal = mat3(transpose(inverse(model))) * normal;//normal;\n"
"gl_Position = proj * view * model * vec4(position, 1.0);\n"
"} \n"
};
const GLchar * fragment_source[] = // holds all color
{
"#version 400 \n"
" \n"
"out vec4 out_color; \n"
"uniform vec4 color;\n"
"\n"
"\n"
"in vec3 Normal ;\n"
"in vec2 Texcoord;\n"
"\n"
"in vec3 frag_position;\n"
"\n"
"\n"
"\n"
"uniform vec3 light_color ;\n"
"uniform vec3 light_position;\n"
"\n"
"\n"
"\n"
"void main(void) \n"
"{ \n"
"\n"
"vec3 light_direction = normalize(light_position - frag_position);\n"
"vec3 ambient = 0.1f * light_color;\n"
"vec3 diffuse = max(dot(normalize(Normal), light_direction), 0.0) * light_color;\n"
"\n"
"\n"
"out_color = vec4((ambient + diffuse) * color.xyz, 1.0); \n"
"} \n"
};
glEnable(GL_DEPTH_TEST); // enable depth
glEnable(GL_CULL_FACE );
// Set mode
//glPolygonMode(GL_FRONT_AND_BACK, GL_LINE); // GL_POINT, GL_LINE, GL_FILL,
//--------------
// shaders
Shader vertex_shader ;
Shader fragment_shader;
vertex_shader.create(DOKUN_SHADER_VERTEX);
vertex_shader.set_source(vertex_source);
vertex_shader.compile();
fragment_shader.create(DOKUN_SHADER_FRAGMENT);
fragment_shader.set_source(fragment_source);
fragment_shader.compile();
// program
Program program;
program.create();
program.attach(vertex_shader );
program.attach(fragment_shader);
program.link();
// delete shaders after linking them to the program
vertex_shader.destroy ();
fragment_shader.destroy();
// use program
program.use ();
//---------------
// set uniforms
////////////////////////
// light
if(program.get_uniform("light_color" ) != -1) program.set_uniform("light_color", 1.0f, 1.0f, 1.0f);//, (alpha / 255));
if(program.get_uniform("light_position" ) != -1) program.set_uniform("light_position", 1,1,1);//static_cast<float>(x), static_cast<float>(y), static_cast<float>(z));//, (alpha / 255));
////////////////////////
// camera
glm::vec3 eye = glm::vec3(camera->get_position().x, camera->get_position().y, camera->get_position().z);
glm::vec3 center = glm::vec3(camera->get_position().x + camera->get_view().x, camera->get_position().y + camera->get_view().y, camera->get_position().z + camera->get_view().z);
glm::vec3 up = glm::vec3(camera->get_up().x , camera->get_up().y , camera->get_up().z);
////////////////////////
#ifdef use_glm
glm::mat4 model;
model = glm::scale(model, glm::vec3(static_cast<float>(sx),
static_cast<float>(sy),
static_cast<float>(sz)));
model = glm::rotate(model, static_cast<float>(rx), glm::vec3(1, 0, 0));
model = glm::rotate(model, static_cast<float>(ry), glm::vec3(0, 1, 0));
model = glm::rotate(model, static_cast<float>(rz), glm::vec3(0, 0, 1));
model = glm::translate(model, glm::vec3(static_cast<float>(x),
static_cast<float>(y),
static_cast<float>(z)));
glm::mat4 view = glm::lookAt(eye, center, up);
glm::mat4 proj = glm::perspective(67.5f, static_cast<float>(800 / 600), 1.0f, 1024.0f); // average fov = 67.5 45 + 90 = 135 / 2 = 67.5 | znear=1 zfar=1024
glm::mat4 modelview = model * view;
glUniformMatrix4fv(glGetUniformLocation((GLuint)program.get_id(), "model"), 1, false, glm::value_ptr(model) ); // object
glUniformMatrix4fv(glGetUniformLocation((GLuint)program.get_id(), "view"), 1, false, glm::value_ptr(view) ); // camera
glUniformMatrix4fv(glGetUniformLocation((GLuint)program.get_id(), "proj"), 1, false, glm::value_ptr(proj) ); // zoom
#endif
////////////////////////
if(program.get_uniform("color") != -1) program.set_uniform("color", (red / 255), (green / 255), (blue / 255), (alpha / 255));
//program.get_default()->set_uniform("base", static_cast<int>(0)); // bind to texture unit 0
////////////////////////
// texture data
/*
GLuint * texture = new GLuint[8]; // 256 would be ideal
for(unsigned int i = 0; i < texture_array.size(); i++) // for each texture
{
std::cout << "GENERATING TEXTURES...." << std::endl;
glGenTextures(8, texture); // generate 256 textures
glActiveTexture(GL_TEXTURE0 + i);
glBindTexture( GL_TEXTURE_2D, (GLuint)texture[i] );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameterf( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, texture_array[i]->get_width(), texture_array[i]->get_height(), 0, GL_RGBA,
GL_UNSIGNED_BYTE, texture_array[i]->get_data() );
glGenerateMipmap(GL_TEXTURE_2D);
}
*/
///////////////////////
///////////////////////
// vertex array obj
GLuint vertex_array_obj;
glGenVertexArrays(1, &vertex_array_obj);
// vertex buffer obj - for drawing
glBindVertexArray(vertex_array_obj); // bind vertex array obj to vertex attrib ptr
GLuint vertex_buffer_obj;
glGenBuffers(1, & vertex_buffer_obj);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer_obj);
glBufferData(GL_ARRAY_BUFFER, vertex_array.size() , &vertex_array[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_DOUBLE, GL_FALSE, 0, static_cast<void*>(0));
glBindVertexArray(0); // unbind
// tex_coord buffer obj - for texture mapping
GLuint uv_buffer_obj;
if(!uv_array.empty())
{
glBindVertexArray(vertex_array_obj); // bind
glGenBuffers(1, & uv_buffer_obj);
glBindBuffer(GL_ARRAY_BUFFER, uv_buffer_obj);
glBufferData(GL_ARRAY_BUFFER, uv_array.size() , &uv_array[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(1);
glVertexAttribPointer(1, 2, GL_DOUBLE, GL_FALSE, 0, static_cast<void*>(0));
glBindVertexArray(0); // unbind
}
// normal buffer - for lighting
GLuint normal_buffer_obj;
if(!normal_array.empty())
{
glBindVertexArray(vertex_array_obj); // bind
glGenBuffers(1, & normal_buffer_obj);
glBindBuffer(GL_ARRAY_BUFFER, normal_buffer_obj);
glBufferData(GL_ARRAY_BUFFER, normal_array.size(), &normal_array[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(2);
glVertexAttribPointer(2, 3, GL_DOUBLE, GL_FALSE, 0, static_cast<void*>(0));
glBindVertexArray(0); // unbind
}
// element buffer - specifies order in which vertices are to be drawn
glBindVertexArray(vertex_array_obj); // bind
GLuint index_buffer_obj;
glGenBuffers(1, & index_buffer_obj);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index_buffer_obj);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, element_array.size(), &element_array[0], GL_STATIC_DRAW);
glBindVertexArray(0);
// Draw the model !
glBindVertexArray(vertex_array_obj); // bind
glDrawElements(GL_TRIANGLES, element_array.size(), GL_UNSIGNED_INT, 0);//glDrawElements(GL_TRIANGLES, element_array.size(), GL_UNSIGNED_INT, &element_array[0]); // elements - order in which vertices are to be drawn
glBindVertexArray(0); // unbind
////////////////////////
// cleanup
// textures
//if(texture_array.size() > 0)
// glDeleteTextures(8, texture);
// attributes
glDisableVertexAttribArray(0); // drawing
glDisableVertexAttribArray(1); // texture mapping
glDisableVertexAttribArray(2); // lighting
// buffers
glDeleteBuffers(1, & vertex_buffer_obj );
glDeleteBuffers(1, & uv_buffer_obj );
glDeleteBuffers(1, & normal_buffer_obj );
glDeleteBuffers(1, & index_buffer_obj );
// arrays
glDeleteVertexArrays(1, & vertex_array_obj);
//glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
// program
program.disable();
#endif
}
And this is the result of the following code:
The size parameter in all glBufferData calls is wrong. This function expects the amount of data in byte, but are supplied, for example, in this line:
glBufferData(GL_ARRAY_BUFFER, vertex_array.size() , &vertex_array[0], GL_STATIC_DRAW);
as the number of elements. What you actually need is something like this:
glBufferData(GL_ARRAY_BUFFER, vertex_array.size() * sizeof(Vector3), &vertex_array[0], GL_STATIC_DRAW);
(assuming that Vector3 contains exactly 3 doubles).
As a side note: It is rather uncommon to supply vertex information in double precision. So if there is no special reason for doing this, I would recommend using float instead.

can't draw any other objects before or after drawing particles

I am working on a game, and trying to implement the instancized CPU-Particle System programmed on http://www.opengl-tutorial.org/intermediate-tutorials/billboards-particles/particles-instancing/
i managed to get it working in my code structure, but i am trying to draw other objects in the same window, which i can't, i have tested it, and it only allows me to draw one, either draw the particle system or draw the object i want.
The problem happens specifically at this code part :
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
// Use our shader
glUseProgram(particleprogramID->programHandle);
unit2 +=1;
glActiveTexture(GL_TEXTURE0 + unit2);
glBindTexture(GL_TEXTURE_2D, texture);
glUniform1i(TextureID, unit2);
glm::mat4 ViewMatrix = camera->getViewMatrix();
// Same as the billboards tutorial
glUniform3f(CameraRight_worldspace_ID, ViewMatrix[0][0], ViewMatrix[1][0], ViewMatrix[2][0]);
glUniform3f(CameraUp_worldspace_ID , ViewMatrix[0][1], ViewMatrix[1][1], ViewMatrix[2][1]);
glUniformMatrix4fv(ViewProjMatrixID, 1, GL_FALSE, &mvp[0][0]);
//glUniformMatrix4fv(modviewprojID, 1, GL_FALSE, &mvp[0][0]);
//1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, billboard_vertex_buffer);
glVertexAttribPointer(
0,
3,
GL_FLOAT,
GL_FALSE,
0,
(void*)0
);
// 2nd attribute buffer : positions of particles' centers
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, particles_position_buffer);
glVertexAttribPointer(
1,
4,
GL_FLOAT,
GL_FALSE,
0,
(void*)0
);
// 3rd attribute buffer : particles' colors
glEnableVertexAttribArray(2);
glBindBuffer(GL_ARRAY_BUFFER, particles_color_buffer);
glVertexAttribPointer(
2,
4,
GL_UNSIGNED_BYTE,
GL_TRUE,
0,
(void*)0
);
glVertexAttribDivisor(0, 0);
glVertexAttribDivisor(1, 1);
glVertexAttribDivisor(2, 1);
glDrawArraysInstanced(GL_TRIANGLE_STRIP, 0, 4, ParticlesCount);
glDisableVertexAttribArray(0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(2);
then i try to draw my star:
unit2 += 1;
starTexture->Bind(unit2);
shaderObject ->useShader();
glUniform1i(glGetUniformLocation(shaderObject->programHandle, "colorTexture"), unit2);
glUniformMatrix4fv(glGetUniformLocation(shaderObject->programHandle, "modelMatrix"), 1, GL_FALSE, glm::value_ptr(star1->getModelMatrix()));
glUniformMatrix4fv(glGetUniformLocation(shaderObject->programHandle, "projectionMatrix"), 1, GL_FALSE, glm::value_ptr(projectionViewMatrix));
star1->draw();
the vertex and fragment shader for the particle system:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 squareVertices;
layout(location = 1) in vec4 xyzs; // Position of the center of the particule and size of the square
layout(location = 2) in vec4 color; // Position of the center of the particule and size of the square
// Output data ; will be interpolated for each fragment.
out vec2 UV;
out vec4 particlecolor;
// Values that stay constant for the whole mesh.
uniform vec3 CameraRight_worldspace;
uniform vec3 CameraUp_worldspace;
uniform mat4 VP; // Model-View-Projection matrix, but without the Model (the position is in BillboardPos; the orientation depends on the camera)
void main()
{
float particleSize = xyzs.w; // because we encoded it this way.
vec3 particleCenter_wordspace = xyzs.xyz;
vec3 vertexPosition_worldspace =
particleCenter_wordspace
+ CameraRight_worldspace * squareVertices.x * particleSize
+ CameraUp_worldspace * squareVertices.y * particleSize;
// Output position of the vertex
gl_Position = VP * vec4(vertexPosition_worldspace, 1.0f);
// UV of the vertex. No special space for this one.
UV = squareVertices.xy + vec2(0.5, 0.5);
particlecolor = color;
}
frragment shader:
#version 330 core
// Interpolated values from the vertex shaders
in vec2 UV;
in vec4 particlecolor;
// Ouput data
out vec4 color;
uniform sampler2D myTexture;
void main(){
// Output color = color of the texture at the specified UV
color = texture2D( myTexture, UV ) * particlecolor;
}
and it only displays the particle system:
worth mentioning is:
the object i want to draw is a star modelled in blender and is displayed correctly when drawn alone or with other objects other than the particle system. and has its own class having buffers for psitions, UVs, indices and normals...
it seems like the star data are being swallowed by the buffer...
i appreciate every help...

Lighting is misbehaving in shader program

Here is my shader program:
#version 330 core
// Input vertex data, different for all executions of this shader.
layout(location = 0) in vec3 vertexPosition_modelspace;
layout(location = 1) in vec3 vertexNormal_modelspace;
// Values that stay constant for the whole mesh.
uniform mat4 MVP;
uniform mat4 V;
uniform mat4 M;
uniform mat3 blNormalMatrix;
uniform vec3 lightPos;
out vec4 forFragColor;
const vec3 diffuseColor = vec3(0.55, 0.09, 0.09);
void main(){
// Output position of the vertex, in clip space : MVP * position
gl_Position = MVP * vec4(vertexPosition_modelspace,1);
vec3 MaterialAmbientColor = vec3(0.1,0.1,0.1) * diffuseColor;
// all following gemetric computations are performed in the
// camera coordinate system (aka eye coordinates)
vec3 vertexNormal_cameraspace = (V*M*vec4(vertexNormal_modelspace,0)).xyz;
vec4 vertexPosition_cameraspace4 = V*M* vec4(vertexPosition_modelspace,1);
vec3 vertexPosition_cameraspace = vec3(vertexPosition_cameraspace4).xyz;
vec3 lightDir = normalize(lightPos - vertexPosition_cameraspace);
float lambertian = clamp(dot(lightDir,vertexNormal_cameraspace), 0.0,1.0);
forFragColor = vec4(lambertian*diffuseColor , 1.0);
}
My problem is that this "worked" in the older opengl profile, didn't even have the version number, I think it was around Opengl 2.1 or so, the key change was that I originally had normal = gl_normalMatrix * gl_normal and things worked.
However that was based on my professor's code which I've updated to the 3.3+ core profile and after maybe fixing the deprecated functions I am now left with this:
https://drive.google.com/file/d/0B6oLZ_d7S-U7cVpkUXpVXzdaZEk/edit?usp=sharing is a link to the video of my program's behavior.
The light source should be a point light at (0,0,3) or so that shouldn't move; but its not following a particularly logical behaviorial pattern, I can't make sense of it.
I tried passing the inverse transpose of the model matrix and using them as a replacement normalMatrix but it wrecked my normals. So I don't know.
This was my normalMatrix:
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelMatrix)));
Edit: Here is my Display code:
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_DEPTH_TEST);
// Use our shader
glUseProgram(programID);
// Get our transformations iff we move the camera around.
glm::mat4 MyModelMatrix = ModelMatrix * thisTran * ThisRot;
MVP = ProjectionMatrix * ViewMatrix * MyModelMatrix;
glm::mat4 ModelView = ViewMatrix * MyModelMatrix;
glm::mat3 MyNormalMatrix = glm::mat3(glm::transpose(glm::inverse(ModelView)));
glm::vec3 newLightPos = lightPos;
// Send our transformation to the currently bound shader,
// in the "MVP" uniform
glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]);
glUniformMatrix4fv(ModelMatrixID, 1, GL_FALSE, &MyModelMatrix[0][0]);
glUniformMatrix4fv(ViewMatrixID, 1, GL_FALSE, &ViewMatrix[0][0]);
glUniformMatrix4fv(BlNormalMatrix,1,GL_FALSE, &MyNormalMatrix[0][0]);
glUniformMatrix4fv(BlRotations, 1, GL_FALSE, &ThisRot[0][0]);
glUniform3f(BlCamera, cameraLoc.x, cameraLoc.y, cameraLoc.z);
glUniform3f(lPosition, newLightPos.x,newLightPos.y,newLightPos.z);
// VBO buffer: vertices
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// 2rd attribute buffer : normals
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glVertexAttribPointer(
1, // attribute
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// draw object using opengl 3.3 shit
glDrawArrays(GL_TRIANGLES, 0, vertices.size() );
The problem ultimately turned out to be an issue with the Model Loader provided by my Professor, was somehow incompatible with modern opengl and would only "mostly" work in that it was clearly missing the left/right normals or they had invalid values. Solved with using an implementation of Assimp.
The code, with assimp linked is like this:
void blInitResWAssimp() {
cout << "blInitResWAssimp" << endl;
blCreateModelViewProjectionMatrix();
//loads object
bool res = loadAssImp("Resources/RCSS-subdiv.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
//bool res = loadAssImp("Resources/cheb.obj", indices, indexed_vertices, indexed_uvs, indexed_normals);
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// Load it into a VBO
glGenBuffers(1, &vertexbuffer);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_vertices.size() * sizeof(glm::vec3), &indexed_vertices[0], GL_STATIC_DRAW);
// Normal buffer
glGenBuffers(1, &normalbuffer);
glBindBuffer(GL_ARRAY_BUFFER, normalbuffer);
glBufferData(GL_ARRAY_BUFFER, indexed_normals.size() * sizeof(glm::vec3), &indexed_normals[0], GL_STATIC_DRAW);
// Generate a buffer for the indices as well
glGenBuffers(1, &elementbuffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elementbuffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(unsigned short), &indices[0], GL_STATIC_DRAW);
//ModelMatrix = ModelMatrix * glm::translate(glm::mat4(1.0f), glm::vec3(-0.5, -0.5, 0));
}
Assimp stuff
bool loadAssImp(
const char * path,
std::vector<unsigned short> & indices,
std::vector<glm::vec3> & vertices,
std::vector<glm::vec2> & uvs,
std::vector<glm::vec3> & normals
){
Assimp::Importer importer;
const aiScene* scene = importer.ReadFile(path, 0/*aiProcess_JoinIdenticalVertices | aiProcess_SortByPType*/);
if (!scene) {
fprintf(stderr, importer.GetErrorString());
getchar();
return false;
}
const aiMesh* mesh = scene->mMeshes[0]; // In this simple example code we always use the 1rst mesh (in OBJ files there is often only one anyway)
const aiMaterial* material = scene->mMaterials[0];
// Fill vertices positions
vertices.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D pos = mesh->mVertices[i];
vertices.push_back(glm::vec3(pos.x, pos.y, pos.z));
}
// Fill vertices texture coordinates
/*
uvs.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D UVW = mesh->mTextureCoords[0][i]; // Assume only 1 set of UV coords; AssImp supports 8 UV sets.
uvs.push_back(glm::vec2(UVW.x, UVW.y));
}*/
// Fill vertices normals
normals.reserve(mesh->mNumVertices);
for (unsigned int i = 0; i<mesh->mNumVertices; i++){
aiVector3D n = mesh->mNormals[i];
//aiVector3D n = mesh->mVertices[i];
normals.push_back(glm::vec3(n.x, n.y, n.z));
}
// Fill face ind5ices
indices.reserve(3 * mesh->mNumFaces);
for (unsigned int i = 0; i<mesh->mNumFaces; i++){
// Assume the model has only triangles.
indices.push_back(mesh->mFaces[i].mIndices[0]);
indices.push_back(mesh->mFaces[i].mIndices[1]);
indices.push_back(mesh->mFaces[i].mIndices[2]);
}
// The "scene" pointer will be deleted automatically by "importer"
}

Opengl 4.3 gl_VertexID not incrementing with glDrawArrays

I'm having difficulty in understanding why gl_VertexID is not properly incrementing for each new vertex in the code below for rendering "debug text". Hints/tips?
(Original code is referenced at the bottom of this post)
Hereafter is the vertex shader:
#version 430 core
layout( location = 0 ) in int Character;
out int vCharacter;
out int vPosition;
void main()
{
vPosition = gl_VertexID;
vCharacter = Character;
gl_Position = vec4(0, 0, 0, 1);
}
The geometry shader:
#version 430 core
layout(points) in;
layout(triangle_strip, max_vertices = 4) out;
in int vCharacter[1];
in int vPosition[1];
out vec2 gTexCoord;
uniform sampler2D Sampler;
uniform vec2 CellSize;
uniform vec2 CellOffset;
uniform vec2 RenderSize;
uniform vec2 RenderOrigin;
void main()
{
// Determine the final quad's position and size:
float x = RenderOrigin.x + float(vPosition[0]) * RenderSize.x * 2.0f;
float y = RenderOrigin.y;
vec4 P = vec4(x, y, 0, 1);
vec4 U = vec4(1, 0, 0, 0) * RenderSize.x;
vec4 V = vec4(0, 1, 0, 0) * RenderSize.y;
// Determine the texture coordinates:
int letter = vCharacter[0];
letter = clamp(letter - 32, 0, 96);
int row = letter / 16 + 1;
int col = letter % 16;
float S0 = CellOffset.x + CellSize.x * col;
float T0 = CellOffset.y + 1 - CellSize.y * row;
float S1 = S0 + CellSize.x - CellOffset.x;
float T1 = T0 + CellSize.y;
// Output the quad's vertices:
gTexCoord = vec2(S0, T1); gl_Position = P - U - V; EmitVertex();
gTexCoord = vec2(S1, T1); gl_Position = P + U - V; EmitVertex();
gTexCoord = vec2(S0, T0); gl_Position = P - U + V; EmitVertex();
gTexCoord = vec2(S1, T0); gl_Position = P + U + V; EmitVertex();
EndPrimitive();
}
The draw call and other relevant code:
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
GLuint attribLocation = glGetAttribLocation(m_ProgramTextPrinter, "Character");
glVertexAttribIPointer(attribLocation, 1, GL_UNSIGNED_BYTE, 1, text.data()->c_str());
glEnableVertexAttribArray(attribLocation);
glDrawArrays(GL_POINTS, 0, text.data()->size());
Basically this code will be used for some text rendering. When I use this code, I see that my letters are put on top of each other. When I modify
glVertexAttribIPointer(attribLocation, 1, GL_UNSIGNED_BYTE, 1, text.data()->c_str());
into
glVertexAttribIPointer(attribLocation, 1, GL_UNSIGNED_BYTE, 2, text.data()->c_str());
I notice there is a shift in the x-direction as expected from the Geometry shader, nevertheless the letters are still on top of each other.
I'm using an NVIDIA Geforce GT 630M, driver version: 320.18 and an OpenGL 4.3 context.
Reference to the original author's code
I got the code working by using VBOs as Bartek hinted at: I basically replaced
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
GLuint attribLocation = glGetAttribLocation(m_ProgramTextPrinter, "Character");
glVertexAttribIPointer(attribLocation, 1, GL_UNSIGNED_BYTE, 1, text.data()->c_str());
glEnableVertexAttribArray(attribLocation);
glDrawArrays(GL_POINTS, 0, text.data()->size());
with
GLuint vaoID, bufferID;
glGenVertexArrays(1, &vaoID);
glBindVertexArray(vaoID);
glGenBuffers(1, &bufferID);
glBindBuffer(GL_ARRAY_BUFFER, bufferID);
glBufferData(GL_ARRAY_BUFFER, text.data()->size() * sizeof(GL_UNSIGNED_BYTE), text.data()->data(), GL_DYNAMIC_DRAW);
GLuint attribLocation = glGetAttribLocation(m_ProgramTextPrinter, "Character");
glVertexAttribIPointer(attribLocation, 1, GL_UNSIGNED_BYTE, 0, 0);
glEnableVertexAttribArray(attribLocation);
glDrawArrays(GL_POINTS, 0, text.data()->size());
glDeleteVertexArrays(1, &vaoID);
glDeleteBuffers(1, &bufferID);