Use of VAO+IBO with GLSL Shaders - c++

I'm attempting to use VAO's+VBO's+IBO's with shaders, but no object gets drawn. I am not sure what I am missing. I am pretty new to C++, and GLSL, so I am not sure if I am screwing something up with the C++ in general, or if I am failing to handle the OpenGL context correctly?
The main function (code for handling window creation is missing. If you think you may need to review it as well, just let me know.):
int main(int argc, char *argv[])
{
//INIT SDL
SDL_Init(SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(400, 300, SDL_WINDOW_OPENGL, &displayWindow, &displayRenderer);
SDL_GetRendererInfo(displayRenderer, &displayRendererInfo);
/*TODO: Check that we have OpenGL */
if ((displayRendererInfo.flags & SDL_RENDERER_ACCELERATED) == 0 || (displayRendererInfo.flags & SDL_RENDERER_TARGETTEXTURE) == 0) {}
SDL_GL_CreateContext(displayWindow);
//SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
glewInit();
int error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glewInit call: " << error << "\n"; };
//glEnable(GL_DEBUG_OUTPUT);
Display_InitGL();
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during Display init: " << error << "\n"; };
Display_SetViewport(400, 300);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during Display Set Viewport Issue: " << error << "\n"; };
// SET UP TEST OBJ
MainChar *player = new MainChar();
player->MainChar_VBO_Func();
GLushort size = player->MainChar_VBO_IndexBuffer_Func();
float count = 0.0;
// END SET UP OF TEST OBJ
GLint *length = new GLint;
const char* vertShdr = readFile("C:\\Users\\JRFerrell\\Documents\\Visual Studio 2013\\Projects\\GLEW Practice\\vertShader.vs", *length);
std::cout << vertShdr;
GLuint vertShaderId = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertShaderId, 1, &vertShdr, length);
std::cout << "\n\nLength: " << *length;
glCompileShader(vertShaderId);
GLint *length2 = new GLint;
const char* fragShdr = readFile("C:\\Users\\JRFerrell\\Documents\\Visual Studio 2013\\Projects\\GLEW Practice\\fragShader.fs", *length2);
GLint fragShaderId = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragShaderId, 1, &fragShdr, length2);
glCompileShader(fragShaderId);
GLuint shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertShaderId);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glAttachShader: " << error << "\n"; };
glAttachShader(shaderProgram, fragShaderId);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glAttachShader: " << error << "\n"; };
glBindAttribLocation(shaderProgram, 0, "in_Position");
glBindAttribLocation(shaderProgram, 1, "in_Normal");
glLinkProgram(shaderProgram);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glLinkProgram: " << error << "\n"; };
// END SHADER PROGRAM DEFINITION
//Check info log for errors:
int Len = 0;
char *Buffer = nullptr;
glGetShaderiv(vertShaderId, GL_INFO_LOG_LENGTH, &Len);
Buffer = new char[Len];
glGetShaderInfoLog(vertShaderId, Len, &Len, Buffer);
std::cout << "Vertex Log:" << std::endl << Buffer << std::endl;
delete[] Buffer;
glGetShaderiv(fragShaderId, GL_INFO_LOG_LENGTH, &Len);
Buffer = new char[Len];
glGetShaderInfoLog(fragShaderId, Len, &Len, Buffer);
std::cout << "Fragment Log:" << std::endl << Buffer << std::endl;
delete[] Buffer;
glGetProgramiv(shaderProgram, GL_INFO_LOG_LENGTH, &Len);
Buffer = new char[Len];
glGetProgramInfoLog(shaderProgram, Len, &Len, Buffer);
std::cout << "Shader Log:" << std::endl << Buffer << std::endl;
delete[] Buffer;
// Create VAO. Don't forget to enable all necessary states because the VAO starts with default state, cleaning all states prev called to do so.
GLuint VaoId;
glGenVertexArrays(1, &VaoId);
glBindVertexArray(VaoId);
// Bind buffers & set-up VAO vertex pointers
glBindBuffer(GL_ARRAY_BUFFER, player->vboID);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glBindBuffer-vboID: " << error << "\n"; }
glEnableClientState(GL_VERTEX_ARRAY);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * GL_FLOAT, (const GLvoid *)0);
glEnableVertexAttribArray(0);
// Set-up VAO normal pointers
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glBindBuffer-vbo init: " << error << "\n"; }
glEnableClientState(GL_NORMAL_ARRAY);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * GL_FLOAT, (void*)(3 * sizeof(GL_FLOAT)));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, player->vboIndexID);
GLint maxLength, nAttribs;
glGetProgramiv(shaderProgram, GL_ACTIVE_ATTRIBUTES, &nAttribs);
glGetProgramiv(shaderProgram, GL_ACTIVE_ATTRIBUTES, &maxLength);
//std::cout << "\nmax length: " << maxLength << "\nnAttribs: " << nAttribs;
glBindVertexArray(0);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glBindVertexArray: " << error << "\n"; };
// End VAO init
while (1){
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glClearColor: " << error << "\n"; };
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error in glClear: " << error << "\n"; };
glLoadIdentity();
glUseProgram(shaderProgram);
glBindVertexArray(VaoId);
glDrawElements(GL_TRIANGLES, size, GL_UNSIGNED_SHORT, 0);
glUseProgram(0);
glBindVertexArray(0);
SDL_GL_SwapWindow(displayWindow);
count -= .1;
}
SDL_Delay(5000);
SDL_Quit();
return 0;
}
::The shader code::
Vertex shader:
#version 400
in vec3 in_Position;
in vec3 in_Normal;
void main()
{
gl_Position = vec4(in_Position, 1.0);
}
Fragment shader:
#version 400
out vec4 FragColor;
void main()
{
FragColor = vec4(0.0f, 0.5f, 1.0f, 1.0f);
}
I did look at similar questions on here already, and they did help me fix a few possible issues, but so far, they obviously haven't proven useful in helping me get my code up and running. I also asked some other people in real time chat on gamedev.net, but they couldn't seem to see where I went wrong either. I fixed a possible issue with declaring glDoubles rather than floats, but that was actually working without the vao and shaders, so that is not (and unlikely ever was) the issue, in whole or part.

I don't know if any of the following will solve your problem, but I do see some issues in your code:
glEnableClientState(GL_VERTEX_ARRAY);
You are mixing here old and deprecated builtin vertex atttributes with the generic vertex attributes. You don't need any of these glEnableClientState calls - your shader doesn't use the builtin attributes. The same goes for the glLoadIdentity which is also totally unneeded and would be invalid in a core profile context.
The second thing I see is that you do not specify your attribute indices, so the GL is free to map them. You also don't query them, but just assume them to be 0 for in_Position and 1 for in_Normal - which is by no means guaranteed to be the case. Use the layout(location=) qualifiers when declaring your input attributes in your vertex shader to actually define the mapping, or use glBindAttribLocation.

quickly looking over your code I am struggling to find where you are sending the BufferData to the GPU.
Generate and Bind new buffer
Initialise Buffers to take data.
Send data using glBufferSubData...
Repeat steps 1 through 3 for Element Arrays.
Generate and Bind Vertex Array Object.
Setup VertexAttribArray Pointers and bind them to your shader.
Bind Element Buffer once again.
Unbind Vertex Array using glBindVertexArray(0)
This is how I setup my buffers using OpenTK, the code should be fairly understandable and useful in any case:
// Generate Vertex Buffer Object and bind it so it is current.
GL.GenBuffers(1, out bufferHandle);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferHandle);
// Initialise storage space for the Vertex Buffer.
GL.BufferData(BufferTarget.ArrayBuffer, bufferSize, IntPtr.Zero, BufferUsageHint.StaticDraw);
// Send Position data.
GL.BufferSubData<Vector3>(
BufferTarget.ArrayBuffer, noOffset, new IntPtr(sizeOfPositionData), bufferObject.PositionData);
// Send Normals data, offset by size of Position data.
GL.BufferSubData<Vector3>(
BufferTarget.ArrayBuffer, new IntPtr(sizeOfPositionData), new IntPtr(sizeOfNormalsData), bufferObject.NormalsData);
// Generate Element Buffer Object and bind it so it is current.
GL.GenBuffers(1, out bufferHandle);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, bufferHandle);
GL.BufferData(
BufferTarget.ElementArrayBuffer, new IntPtr(sizeof(uint) * bufferObject.IndicesData.Length), bufferObject.IndicesData, BufferUsageHint.StaticDraw);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferObject.VboID);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, bufferObject.IboID);
// Generate Vertex Array Object and bind it so it is current.
GL.GenVertexArrays(1, out bufferHandle);
GL.BindVertexArray(bufferHandle);
bufferHandle = GL.GetAttribLocation(program, "in_position");
GL.EnableVertexAttribArray(bufferHandle);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferObject.VboID);
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, true, Vector3.SizeInBytes, 0);
GL.BindAttribLocation(program, bufferHandle, "in_position");
bufferHandle = GL.GetAttribLocation(program, "in_normal");
GL.EnableVertexAttribArray(bufferHandle);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferObject.VboID);
GL.VertexAttribPointer(1, 3, VertexAttribPointerType.Float, true, Vector3.SizeInBytes, sizeOfPositionData);
GL.BindAttribLocation(program, bufferHandle, "in_normal");
GL.BindBuffer(BufferTarget.ElementArrayBuffer, bufferObject.IboID);
// IMPORTANT: vertex array needs unbinding here to avoid rendering incorrectly
GL.BindVertexArray(0);

Well, after sitting down and reading the docs for version 4.0, I learned that I had screwed up on my attrib pointers by passing incorrect stride and pointers to the start of the buffer data. My thought was that the stride was the size of the element type multiplied by the number of attribute elements, so you'd get the next attribute you were looking for. Obviously that is not what you are supposed to do. I changed that to zero since my attribs are back to back:
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * GL_FLOAT, (void*)(3 * sizeof(GL_FLOAT)));"
-->
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (void*)(3 * sizeof(GL_FLOAT)));"
Then the pointer I tried handling almost the same exact way. Should have been a null pointer to the first buffer attrib location:
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (void*)(3 * sizeof(GL_FLOAT)));"
-->
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (GLubyte *)NULL);"
Once I actually sat down and read the docs closely, I understood what actually belonged there. Now the shaders are working and I can work on the cool stuff... :P Thanks for the efforts to answer my question anyways everyone. :)

Related

Simple opengl triangle function not drawing anything to screen

I am just getting started with OpenGL, and have already hit a pretty frustrating bug with it. I've followed the learnopengl tutorial, encapsulating most stuff into a renderer class, which has uints for buffers and such. Here is the main code that does everything:
#include <gfx/gfx.h>
#include <gfx/gl.h>
#include <gfx/shaders.h>
#include <iostream>
void Renderer::init() {
vertex_shader_id = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader_id, 1, &vertex_shader, nullptr);
glCompileShader(vertex_shader_id);
GLint vertex_shader_status;
glGetShaderiv(vertex_shader_id, GL_COMPILE_STATUS, &vertex_shader_status);
if (vertex_shader_status == false) {
std::cout << "vsh compilation failed due to";
char vertex_fail_info_log[1024];
glGetShaderInfoLog(vertex_shader_id, 1024, nullptr, vertex_fail_info_log);
std::cout << vertex_fail_info_log << std::endl;
abort();
}
fragment_shader_id = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader_id, 1, &fragment_shader, nullptr);
glCompileShader(fragment_shader_id);
GLint fragment_shader_status;
glGetShaderiv(fragment_shader_id, GL_COMPILE_STATUS, &fragment_shader_status);
if (fragment_shader_status == false) {
std::cout << "fsh compilation failed due to";
char fragment_fail_info_log[1024];
glGetShaderInfoLog(fragment_shader_id, 1024, nullptr, fragment_fail_info_log);
std::cout << fragment_fail_info_log << std::endl;
abort();
}
shader_program = glCreateProgram();
glAttachShader(shader_program, vertex_shader_id);
glAttachShader(shader_program, fragment_shader_id);
glLinkProgram(shader_program);
GLint shader_program_status;
glGetProgramiv(shader_program, GL_LINK_STATUS, &shader_program_status);
if (shader_program_status == false) {
std::cout << "shprogram compilation failed due to";
char shader_program_fail_info_log[1024];
glGetShaderInfoLog(shader_program, 1024, nullptr, shader_program_fail_info_log);
std::cout << shader_program_fail_info_log << std::endl;
abort();
}
glUseProgram(shader_program);
glDeleteShader(vertex_shader_id);
glDeleteShader(fragment_shader_id);
}
void Renderer::draw(f32 verts[]) {
glUseProgram(shader_program);
glClearColor(1, 0, 0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glCreateVertexArrays(1, &vertex_array);
glBindVertexArray(vertex_array);
glCreateBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(f32), (void*)0);
glEnableVertexAttribArray(0);
glBindVertexArray(vertex_array);
glUseProgram(shader_program);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
Here is shaders.h :
#ifndef SHADERS_H
#define SHADERS_H
const char* vertex_shader =
"#version 460 core\n"
"layout (location = 0) in vec3 aPos;\n"
"void main() {\n"
"gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n"
"}\n\0";
const char* fragment_shader =
"#version 460 core\n"
"out vec4 FragColor;\n"
"void main() {\n"
"FragColor = vec4(0.0f, 1.0f, 0.0f, 1.0f);\n"
"}\n\0";
#endif
I cannot figure out for life of me, what is wrong. The red clear color shows up, but nothing else.
Looks like there's a problem with your draw method. The signature is void Renderer::draw(f32 verts[]) { Then later on you call glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW); . The thing is, when you pass an array to a function, it decays to a pointer (Even though the declaration can make that look like it's not happening, which is very confusing). So in a function parameter, draw(f32 verts[]) is equivalent to draw(f32* verts). This question has some explanations on what's happening there.
Anyways, when you call sizeof(verts), you're just getting the number of bytes of a float pointer, not the number of bytes owned by verts. So you will not be specifying enough bytes when you call glBufferData() to create the triangle you are going for. The simple fix is to pass a length into your draw function, and then you would have something like
void Renderer::draw(f32* verts, int length) {
//...
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * length, verts, GL_STATIC_DRAW);
//...
}
Here is some docs on this particular function. It is possible there are other errors, but since you aren't blackscreening, and generally the code looks right, it is unlikely there's a bunch of invalid operations or anything.
To continue debugging after this, add the following to your code
#define GL_ERROR_CHECK() (log_error(__FILE__, __LINE__))
void log_error(const* file, int line) {
GLenum err;
while((err = glGetError()) != GL_NO_ERROR) {
std::cout << "GL error " << err << " in " << file << "at line " << line << std::endl;
}
}
and sprinkle GL_ERROR_CHECK() all over the place to see if any of the OpenGL calls were invalid.

Open GL ES 2.0 Why is my VBO not working?

Hey there Im trying to draw a simple quad with 2 triangles in OpenGL through a vbo.
However I have looked at my code multiple times and I don't see what I am missing.
I ain't no Open GL expert, The code was working fine without any buffers however when I switchted to VBO Im not seeing anything anymore. OpenGL also doesn't provide any helpfull errors.
Image::Image(Graphics * GFX)
{
glm::vec2 corners[2];
corners[0] = glm::vec2(0, 0);
corners[1] = glm::vec2(1, 1);
vertices = new GLfloat[30]
{
//Vertices XYZ TexCoord X,Y
corners[0].x, corners[0].y, 0.0f, 0,0,
corners[0].x, corners[1].y, 0.0f, 0,1,
corners[1].x, corners[0].y, 0.0f, 1,0,
corners[1].x,corners[1].y,0.0f, 1,1,
corners[0].x, corners[1].y,0.0f, 0,1,
corners[1].x, corners[0].y, 0.0f, 1,0
};
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
//Setting up some stuff
const char *vertexShaderSource =
"attribute vec4 vPosition; \n"
"attribute vec2 vTexCoord; \n"
"void main() \n"
"{ \n"
" gl_Position = vPosition; \n"
"} \n";
const char *fragmentShaderSource =
"precision mediump float; \n"
"void main() \n"
"{ \n"
" gl_FragColor = vec4(1.0, 1.0, 1.0, 1.0); \n"
"} \n";
// Load and compile the vertex/fragment shaders
vertexShader = GFX->LoadShader(GL_VERTEX_SHADER, (const char*)vertexShaderSource);
fragmentShader = GFX->LoadShader(GL_FRAGMENT_SHADER, (const char*)fragmentShaderSource);
// Create the program object
programObject = glCreateProgram();
// now we have the V and F shaders attach them to the progam object
glAttachShader(programObject, vertexShader);
glAttachShader(programObject, fragmentShader);
// Link the program
glLinkProgram(programObject);
// Check the link status
// Link the program
GLint AreTheylinked;
glGetProgramiv(programObject, GL_LINK_STATUS, &AreTheylinked);
if (!AreTheylinked)
{
GLint RetinfoLen = 0;
// check and report any errors
glGetProgramiv(programObject, GL_INFO_LOG_LENGTH, &RetinfoLen);
if (RetinfoLen > 1)
{
GLchar* infoLog = (GLchar*)malloc(sizeof(char) * RetinfoLen);
glGetProgramInfoLog(programObject, RetinfoLen, NULL, infoLog);
fprintf(stderr, "Error linking program:\n%s\n", infoLog);
free(infoLog);
}
glDeleteProgram(programObject);
}
positionLocation = glGetAttribLocation(programObject, "vPosition");
textureCoordLocation = glGetAttribLocation(programObject, "vTexCoord");
if (glGetError() == GL_NO_ERROR) {}
else
printf("Init failed!\n");
//End setting up
}
Image::~Image()
{
}
void Image::Draw()
{
std::cout << "Calling Draw on Image" << std::endl;
glUseProgram(programObject);
glEnable(GL_DEPTH_TEST);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
GLsizei stride = (5) * sizeof(GLfloat);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, stride, 0);
glEnableVertexAttribArray(0);
glDrawArrays(GL_TRIANGLES, 0, 6);
if (glGetError() != GL_NO_ERROR)
{
printf("UI Draw error\n");
}
glDisableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
And how I am loading the shaders:
GLuint Graphics::LoadShader(GLenum type, const char *shaderSrc)
{
// 1st create the shader object
GLuint TheShader = glCreateShader(type);
if (TheShader == 0) return FALSE; // can't allocate so stop.
// pass the shader source then compile it
glShaderSource(TheShader, 1, &shaderSrc, NULL);
glCompileShader(TheShader);
GLint IsItCompiled;
// After the compile we need to check the status and report any errors
glGetShaderiv(TheShader, GL_COMPILE_STATUS, &IsItCompiled);
if (!IsItCompiled)
{
GLint RetinfoLen = 0;
glGetShaderiv(TheShader, GL_INFO_LOG_LENGTH, &RetinfoLen);
if (RetinfoLen > 1)
{ // standard output for errors
char* infoLog = (char*)malloc(sizeof(char) * RetinfoLen);
glGetShaderInfoLog(TheShader, RetinfoLen, NULL, infoLog);
fprintf(stderr, "Error compiling this shader:\n%s\n", infoLog);
free(infoLog);
}
glDeleteShader(TheShader);
return FALSE;
}
return TheShader;
}
It worked fine without a buffer previously, and saw a white square by using:
glVertexAttribPointer(0, 6, GL_FLOAT, GL_FALSE, 0, vertices);
But now I want to add texture coordinates to my quad through a VBO but nothing shows anymore.
Smoothy101
Your buffer creation looks fine - the problem is your attribute pointer setup. You've added texture coordinates which has changed data layout for each vertex, but you've not handled that in your code.
You need something like this:
glVertexAttribPointer(positionLocation, 3, GL_FLOAT, GL_FALSE, stride, 0);
glVertexAttribPointer(textureCoordLocation, 2, GL_FLOAT, GL_FALSE, stride, 8);
... not this:
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, stride, 0);

GtkGLArea clears background but does not draw

I have been writing a simple GTK+ application and am just getting started with graphical development. I understand that this may not be a good place to start, jumping straight into 3D rendering, but I've done a small amount of it before and with great success using Glade and reading a plethora of docs, I figured it would not be hard to integrate the two - I figured incorrectly. The problem at hand is that glDrawArrays appears to not be working. I looked at this question and unfortunately, it did not help me. I followed some of this tutorial on OpenGL and also this tutorial on GtkGLArea again to no avail.
Can anyone point me in the right direction on this one? I'm not sure where to go from here.
The relevant code is below:
#include "RenderingManager.hpp"
RenderingManager::RenderingManager() {
///GTK+ Setup///
std::cout << "starting render constructor" << std::endl;
glArea = GTK_GL_AREA(gtk_gl_area_new());
std::cout << "got new glarea" << std::endl;
g_signal_connect(GTK_WIDGET(glArea), "render", G_CALLBACK(signal_render), this);
g_signal_connect(GTK_WIDGET(glArea), "realize", G_CALLBACK(signal_realize), this);
g_signal_connect(GTK_WIDGET(glArea), "unrealize", G_CALLBACK(signal_unrealize), this);
gtk_widget_show(GTK_WIDGET(glArea));
///Get Shaders///
// vshader.open("vertex.shader");
// fshader.open("fragment.shader");
std::cout << "finished render constructor" << std::endl;
}
void RenderingManager::onRender() {
// Dark blue background
glClearColor(0.1f, 0.0f, 0.1f, 0.0f);
draw_triangle();
glFlush();
}
void RenderingManager::initBuffers () {
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
}
void RenderingManager::loadShaders() {
// Read the Vertex Shader code from the file
std::ifstream VertexShaderStream("vertex.shader", std::ios::in);
if(VertexShaderStream.is_open()){
std::string Line = "";
while(getline(VertexShaderStream, Line))
vshader += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::ifstream FragmentShaderStream("fragment.shader", std::ios::in);
if(FragmentShaderStream.is_open()){
std::string Line = "";
while(getline(FragmentShaderStream, Line))
fshader += "\n" + Line;
FragmentShaderStream.close();
}
GLuint vsh, fsh;
vsh = glCreateShader(GL_VERTEX_SHADER);
fsh = glCreateShader(GL_FRAGMENT_SHADER);
vshp = vshader.data();
fshp = fshader.data();
// vshp = vshader.get().c_str();
// fshp = fshader.get().c_str();
// vshader.get(vshp);
// fshader.get(fshp);
printf("%s\n%s\n", vshp, fshp);
glShaderSource(vsh, 1, &vshp, NULL);
glShaderSource(fsh, 1, &fshp, NULL);
glCompileShader(vsh);
glCompileShader(fsh);
shaderProgramID = glCreateProgram();
glAttachShader(shaderProgramID, vsh);
glAttachShader(shaderProgramID, fsh);
glLinkProgram(shaderProgramID);
GLint Result = GL_FALSE;
int InfoLogLength;
// Check Vertex Shader
glGetShaderiv(vsh, GL_COMPILE_STATUS, &Result);
glGetShaderiv(vsh, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
char* VertexShaderErrorMessage = new char[InfoLogLength+1];
glGetShaderInfoLog(vsh, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
// Check Fragment Shader
glGetShaderiv(fsh, GL_COMPILE_STATUS, &Result);
glGetShaderiv(fsh, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
char* FragmentShaderErrorMessage = new char[InfoLogLength+1];
glGetShaderInfoLog(fsh, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
}
void RenderingManager::onActivate() {
// We need to make the context current if we want to
// call GL API
gtk_gl_area_make_current (glArea);
glewExperimental = GL_TRUE;
glewInit();
loadShaders();
initBuffers();
}
void RenderingManager::signal_render(GtkGLArea *a, gpointer *user_data) {
reinterpret_cast<RenderingManager*>(user_data)->onRender();
}
void RenderingManager::signal_realize(GtkGLArea *a, gpointer *user_data) {
reinterpret_cast<RenderingManager*>(user_data)->onActivate();
}
void RenderingManager::signal_unrealize(GtkGLArea *a, gpointer *user_data) {
//Don't do this
//reinterpret_cast<RenderingManager*>(user_data)->~RenderingManager();
}
void RenderingManager::draw_triangle() {
// Clear the screen
glClear( GL_COLOR_BUFFER_BIT );
// Use our shader
glUseProgram(shaderProgramID);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3); // 3 indices starting at 0 -> 1 triangle
glDisableVertexAttribArray(0);
}
GtkGLArea *RenderingManager::expose() {
//yikes
return glArea;
}
RenderingManager::~RenderingManager() {
glDeleteBuffers(1, &vbo);
glDeleteVertexArrays(1, &vao);
glDeleteProgram(shaderProgramID);
std::cout << "GL Resources deleted." << std::endl;
}
Due to the asynchronous nature of X11 (Gtk+ uses it) the gl-context can't be created before the window is realized (a connection to X11 is made).
Create the gl-context in your signal_realize() and make it current before drawing, which should be done handling signal expose_event (gtk+ 2) or draw(gtk+ 3)

openGL migration from SFML to glut, vertices arrays or display lists are not displayed

Due to using quad buffered stereo 3D (which i have not included yet), i need to migrate my openGL program from a SFML window to a glut window.
With SFML my vertices and display list were properly displayed, now with glut my window is blank white (or another color depending on the way i clear it).
Here is the code to initialise the window :
int type;
int stereoMode = 0;
if ( stereoMode == 0 )
type = GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH;
else
type = GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH | GLUT_STEREO;
glutInitDisplayMode(type);
int argc = 0;
char *argv = "";
glewExperimental = GL_TRUE;
glutInit(&argc, &argv);
bool fullscreen = false;
glutInitWindowSize(width,height);
int win = glutCreateWindow(title.c_str());
glutSetWindow(win);
assert(win != 0);
if ( fullscreen ) {
glutFullScreen();
width = glutGet(GLUT_SCREEN_WIDTH);
height = glutGet(GLUT_SCREEN_HEIGHT);
}
GLenum err = glewInit();
if (GLEW_OK != err) {
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
glutDisplayFunc(loop_function);
This is the only code i had to change for now, but here is the code i used with sfml and displayed my objects in the loop, if i change the value of glClearColor, the window's background does change color so the opengl context seems to be working :
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(255.0f, 255.0f, 255.0f, 0.0f);
glLoadIdentity();
sf::Time elapsed_time = clock.getElapsedTime();
clock.restart();
camera->animate(elapsed_time.asMilliseconds());
camera->look();
for (auto i = objects->cbegin(); i != objects->cend(); ++i)
(*i)->draw(camera);
glutSwapBuffers();
Is there any other changes i should have done switching to glut ? that would be great if someone could enlighten me on the subject.
In addition to that, i found out that adding too many objects (that were well handled before with SFML), openGL gives error 1285: out of memory. Maybe this is related.
EDIT :
Here is the code i use to draw each object, maybe it is the problem :
GLuint LightID = glGetUniformLocation(this->shaderProgram, "LightPosition_worldspace");
if(LightID ==-1)
cout << "LightID not found ..." << endl;
GLuint MaterialAmbientID = glGetUniformLocation(this->shaderProgram, "MaterialAmbient");
if(LightID ==-1)
cout << "LightID not found ..." << endl;
GLuint MaterialSpecularID = glGetUniformLocation(this->shaderProgram, "MaterialSpecular");
if(LightID ==-1)
cout << "LightID not found ..." << endl;
glm::vec3 lightPos = glm::vec3(0,150,150);
glUniform3f(LightID, lightPos.x, lightPos.y, lightPos.z);
glUniform3f(MaterialAmbientID, MaterialAmbient.x, MaterialAmbient.y, MaterialAmbient.z);
glUniform3f(MaterialSpecularID, MaterialSpecular.x, MaterialSpecular.y, MaterialSpecular.z);
// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(shaderProgram, "myTextureSampler");
if(!TextureID)
cout << "TextureID not found ..." << endl;
glActiveTexture(GL_TEXTURE0);
sf::Texture::bind(texture);
glUniform1i(TextureID, 0);
// 2nd attribute buffer : UV
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(vertexUVID==-1)
cout << "vertexUVID not found ..." << endl;
glEnableVertexAttribArray(vertexUVID);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glVertexAttribPointer(vertexUVID, 2, GL_FLOAT, GL_FALSE, 0, 0);
GLuint vertexNormal_modelspaceID = glGetAttribLocation(shaderProgram, "normal");
if(!vertexNormal_modelspaceID)
cout << "vertexNormal_modelspaceID not found ..." << endl;
glEnableVertexAttribArray(vertexNormal_modelspaceID);
glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
glVertexAttribPointer(vertexNormal_modelspaceID, 3, GL_FLOAT, GL_FALSE, 0, 0 );
GLint posAttrib;
posAttrib = glGetAttribLocation(shaderProgram, "position");
if(!posAttrib)
cout << "posAttrib not found ..." << endl;
glEnableVertexAttribArray(posAttrib);
glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
glVertexAttribPointer(posAttrib, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
GLuint error;
while ((error = glGetError()) != GL_NO_ERROR) {
cerr << "OpenGL error: " << error << endl;
}
disableShaders();
The code is fine, migrating from SFML to glut doesn't need a lot of changes but you will have to change the textures if you used SFML texture object. The only way you are not seeing anything else than your background changing color is simply because your camera is not looking at your object.
I advise you check the code of your view and or post it.

OpenGL glGetError 1281 bad value

I am using OpenGL with vertices and shaders, nothing got displayed on my screen so i used glGetError to debug : I got an error 1281(bad value) on one of my buffer called color_array_buffer, here is the section i am talking about :
GLenum error = glGetError();
if(error) {
cout << error << endl;
return ;
} else {
cout << "no error yet" << endl;
}
//no error
// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(shaderProgram, "myTextureSampler");
if(!TextureID)
cout << "TextureID not found ..." << endl;
// Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
sf::Texture::bind(texture);
// Set our "myTextureSampler" sampler to user Texture Unit 0
glUniform1i(TextureID, 0);
// 2nd attribute buffer : UVs
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(!vertexUVID)
cout << "vertexUVID not found ..." << endl;
glEnableVertexAttribArray(vertexUVID);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glVertexAttribPointer(vertexUVID, 2, GL_FLOAT, GL_FALSE, 0, 0);
error = glGetError();
if(error) {
cout << error << endl;
return ;
}
//error 1281
And here is the code where i link my buffer to the array :
if (textured) {
texture = new sf::Texture();
if(!texture->loadFromFile("textures/simple.jpeg"/*,sf::IntRect(0, 0, 128, 128)*/))
std::cout << "Error loading texture !!" << std::endl;
glGenBuffers(1, &color_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(glm::vec3), &uvs[0], GL_STATIC_DRAW);
}
and my values of uvs :
uvs[0] : 0.748573-0.750412
uvs[1] : 0.749279-0.501284
uvs[2] : 0.99911-0.501077
uvs[3] : 0.999455-0.75038
uvs[4] : 0.250471-0.500702
uvs[5] : 0.249682-0.749677
uvs[6] : 0.001085-0.75038
uvs[7] : 0.001517-0.499994
uvs[8] : 0.499422-0.500239
uvs[9] : 0.500149-0.750166
uvs[10] : 0.748355-0.99823
uvs[11] : 0.500193-0.998728
uvs[12] : 0.498993-0.250415
uvs[13] : 0.748953-0.25092
Am i doing something wrong, if someone could help me that would be great.
Your check for glGetAttribLocation() failing to find the attribute is incorrect:
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(!vertexUVID)
cout << "vertexUVID not found ..." << endl;
glGetAttribLocation() returns a GLint (not GLuint), and the result is -1 if an attribute with the given name is not found in the program. Since you assign the value to an unsigned variable, it will end up being the largest possible unsigned, which is then an invalid argument if you pass it to glEnableVertexAttribArray() afterwards.
Your code should look like this instead:
GLint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(vertexUVID < 0)
cout << "vertexUVID not found ..." << endl;
Note that 0 is a perfectly valid attribute location.