OpenGL glGetError 1281 bad value - c++

I am using OpenGL with vertices and shaders, nothing got displayed on my screen so i used glGetError to debug : I got an error 1281(bad value) on one of my buffer called color_array_buffer, here is the section i am talking about :
GLenum error = glGetError();
if(error) {
cout << error << endl;
return ;
} else {
cout << "no error yet" << endl;
}
//no error
// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(shaderProgram, "myTextureSampler");
if(!TextureID)
cout << "TextureID not found ..." << endl;
// Bind our texture in Texture Unit 0
glActiveTexture(GL_TEXTURE0);
sf::Texture::bind(texture);
// Set our "myTextureSampler" sampler to user Texture Unit 0
glUniform1i(TextureID, 0);
// 2nd attribute buffer : UVs
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(!vertexUVID)
cout << "vertexUVID not found ..." << endl;
glEnableVertexAttribArray(vertexUVID);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glVertexAttribPointer(vertexUVID, 2, GL_FLOAT, GL_FALSE, 0, 0);
error = glGetError();
if(error) {
cout << error << endl;
return ;
}
//error 1281
And here is the code where i link my buffer to the array :
if (textured) {
texture = new sf::Texture();
if(!texture->loadFromFile("textures/simple.jpeg"/*,sf::IntRect(0, 0, 128, 128)*/))
std::cout << "Error loading texture !!" << std::endl;
glGenBuffers(1, &color_array_buffer);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glBufferData(GL_ARRAY_BUFFER, uvs.size() * sizeof(glm::vec3), &uvs[0], GL_STATIC_DRAW);
}
and my values of uvs :
uvs[0] : 0.748573-0.750412
uvs[1] : 0.749279-0.501284
uvs[2] : 0.99911-0.501077
uvs[3] : 0.999455-0.75038
uvs[4] : 0.250471-0.500702
uvs[5] : 0.249682-0.749677
uvs[6] : 0.001085-0.75038
uvs[7] : 0.001517-0.499994
uvs[8] : 0.499422-0.500239
uvs[9] : 0.500149-0.750166
uvs[10] : 0.748355-0.99823
uvs[11] : 0.500193-0.998728
uvs[12] : 0.498993-0.250415
uvs[13] : 0.748953-0.25092
Am i doing something wrong, if someone could help me that would be great.

Your check for glGetAttribLocation() failing to find the attribute is incorrect:
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(!vertexUVID)
cout << "vertexUVID not found ..." << endl;
glGetAttribLocation() returns a GLint (not GLuint), and the result is -1 if an attribute with the given name is not found in the program. Since you assign the value to an unsigned variable, it will end up being the largest possible unsigned, which is then an invalid argument if you pass it to glEnableVertexAttribArray() afterwards.
Your code should look like this instead:
GLint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(vertexUVID < 0)
cout << "vertexUVID not found ..." << endl;
Note that 0 is a perfectly valid attribute location.

Related

Simple opengl triangle function not drawing anything to screen

I am just getting started with OpenGL, and have already hit a pretty frustrating bug with it. I've followed the learnopengl tutorial, encapsulating most stuff into a renderer class, which has uints for buffers and such. Here is the main code that does everything:
#include <gfx/gfx.h>
#include <gfx/gl.h>
#include <gfx/shaders.h>
#include <iostream>
void Renderer::init() {
vertex_shader_id = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader_id, 1, &vertex_shader, nullptr);
glCompileShader(vertex_shader_id);
GLint vertex_shader_status;
glGetShaderiv(vertex_shader_id, GL_COMPILE_STATUS, &vertex_shader_status);
if (vertex_shader_status == false) {
std::cout << "vsh compilation failed due to";
char vertex_fail_info_log[1024];
glGetShaderInfoLog(vertex_shader_id, 1024, nullptr, vertex_fail_info_log);
std::cout << vertex_fail_info_log << std::endl;
abort();
}
fragment_shader_id = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader_id, 1, &fragment_shader, nullptr);
glCompileShader(fragment_shader_id);
GLint fragment_shader_status;
glGetShaderiv(fragment_shader_id, GL_COMPILE_STATUS, &fragment_shader_status);
if (fragment_shader_status == false) {
std::cout << "fsh compilation failed due to";
char fragment_fail_info_log[1024];
glGetShaderInfoLog(fragment_shader_id, 1024, nullptr, fragment_fail_info_log);
std::cout << fragment_fail_info_log << std::endl;
abort();
}
shader_program = glCreateProgram();
glAttachShader(shader_program, vertex_shader_id);
glAttachShader(shader_program, fragment_shader_id);
glLinkProgram(shader_program);
GLint shader_program_status;
glGetProgramiv(shader_program, GL_LINK_STATUS, &shader_program_status);
if (shader_program_status == false) {
std::cout << "shprogram compilation failed due to";
char shader_program_fail_info_log[1024];
glGetShaderInfoLog(shader_program, 1024, nullptr, shader_program_fail_info_log);
std::cout << shader_program_fail_info_log << std::endl;
abort();
}
glUseProgram(shader_program);
glDeleteShader(vertex_shader_id);
glDeleteShader(fragment_shader_id);
}
void Renderer::draw(f32 verts[]) {
glUseProgram(shader_program);
glClearColor(1, 0, 0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glCreateVertexArrays(1, &vertex_array);
glBindVertexArray(vertex_array);
glCreateBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(f32), (void*)0);
glEnableVertexAttribArray(0);
glBindVertexArray(vertex_array);
glUseProgram(shader_program);
glDrawArrays(GL_TRIANGLES, 0, 3);
}
Here is shaders.h :
#ifndef SHADERS_H
#define SHADERS_H
const char* vertex_shader =
"#version 460 core\n"
"layout (location = 0) in vec3 aPos;\n"
"void main() {\n"
"gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n"
"}\n\0";
const char* fragment_shader =
"#version 460 core\n"
"out vec4 FragColor;\n"
"void main() {\n"
"FragColor = vec4(0.0f, 1.0f, 0.0f, 1.0f);\n"
"}\n\0";
#endif
I cannot figure out for life of me, what is wrong. The red clear color shows up, but nothing else.
Looks like there's a problem with your draw method. The signature is void Renderer::draw(f32 verts[]) { Then later on you call glBufferData(GL_ARRAY_BUFFER, sizeof(verts), verts, GL_STATIC_DRAW); . The thing is, when you pass an array to a function, it decays to a pointer (Even though the declaration can make that look like it's not happening, which is very confusing). So in a function parameter, draw(f32 verts[]) is equivalent to draw(f32* verts). This question has some explanations on what's happening there.
Anyways, when you call sizeof(verts), you're just getting the number of bytes of a float pointer, not the number of bytes owned by verts. So you will not be specifying enough bytes when you call glBufferData() to create the triangle you are going for. The simple fix is to pass a length into your draw function, and then you would have something like
void Renderer::draw(f32* verts, int length) {
//...
glBufferData(GL_ARRAY_BUFFER, sizeof(float) * length, verts, GL_STATIC_DRAW);
//...
}
Here is some docs on this particular function. It is possible there are other errors, but since you aren't blackscreening, and generally the code looks right, it is unlikely there's a bunch of invalid operations or anything.
To continue debugging after this, add the following to your code
#define GL_ERROR_CHECK() (log_error(__FILE__, __LINE__))
void log_error(const* file, int line) {
GLenum err;
while((err = glGetError()) != GL_NO_ERROR) {
std::cout << "GL error " << err << " in " << file << "at line " << line << std::endl;
}
}
and sprinkle GL_ERROR_CHECK() all over the place to see if any of the OpenGL calls were invalid.

Is deepest mipmap level not an average of all the texels?

I was trying to get an average of all the texels I've drawn in a texture attached to an FBO. The texture has RGBA32F format, so precision loss should be minimal in any case.
For actual computation of the average I thought to use hardware generation of mipmaps, via glGenerateMipmap command, and then get the deepest mipmap level – 1×1.
This works nicely when the texture has power-of-two dimensions. But when it's even one pixel below that, and until some other size, I get results very far from average.
See e.g. the following test program:
#include <cmath>
#include <vector>
#include <string>
#include <iostream>
// glad.h is generated by the following command:
// glad --out-path=. --generator=c --omit-khrplatform --api="gl=3.3" --profile=core --extensions=
#include "glad/glad.h"
#include <GL/freeglut.h>
#include <glm/glm.hpp>
using glm::vec4;
GLuint vao, vbo;
GLuint texFBO;
GLuint program;
GLuint fbo;
int width=512, height=512;
void getMeanPixelValue(int texW, int texH)
{
// Get average value of the rendered pixels as the value of the deepest mipmap level
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texFBO);
glGenerateMipmap(GL_TEXTURE_2D);
using namespace std;
// Formula from the glspec, "Mipmapping" subsection in section 3.8.11 Texture Minification
const auto totalMipmapLevels = 1+floor(log2(max(texW,texH)));
const auto deepestLevel=totalMipmapLevels-1;
// Sanity check
int deepestMipmapLevelWidth=-1, deepestMipmapLevelHeight=-1;
glGetTexLevelParameteriv(GL_TEXTURE_2D, deepestLevel, GL_TEXTURE_WIDTH, &deepestMipmapLevelWidth);
glGetTexLevelParameteriv(GL_TEXTURE_2D, deepestLevel, GL_TEXTURE_HEIGHT, &deepestMipmapLevelHeight);
assert(deepestMipmapLevelWidth==1);
assert(deepestMipmapLevelHeight==1);
vec4 pixel;
glGetTexImage(GL_TEXTURE_2D, deepestLevel, GL_RGBA, GL_FLOAT, &pixel[0]);
// Get average value in an actual summing loop over all the pixels
std::vector<vec4> data(texW*texH);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, data.data());
vec4 avg(0,0,0,0);
for(auto const& v : data)
avg+=v;
avg/=texW*texH;
std::cerr << "Mipmap value: " << pixel[0] << ", " << pixel[1] << ", " << pixel[2] << ", " << pixel[3] << "\n";
std::cerr << "True average: " << avg[0] << ", " << avg[1] << ", " << avg[2] << ", " << avg[3] << "\n";
}
GLuint makeShader(GLenum type, std::string const& srcStr)
{
const auto shader=glCreateShader(type);
const GLint srcLen=srcStr.size();
const GLchar*const src=srcStr.c_str();
glShaderSource(shader, 1, &src, &srcLen);
glCompileShader(shader);
GLint status=-1;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
assert(glGetError()==GL_NO_ERROR);
assert(status);
return shader;
}
void loadShaders()
{
program=glCreateProgram();
const auto vertexShader=makeShader(GL_VERTEX_SHADER, R"(
#version 330
in vec4 vertex;
void main() { gl_Position=vertex; }
)");
glAttachShader(program, vertexShader);
const auto fragmentShader=makeShader(GL_FRAGMENT_SHADER, R"(
#version 330
out vec4 color;
void main()
{
color.r = gl_FragCoord.y<100 ? 1 : 0;
color.g = gl_FragCoord.y<200 ? 1 : 0;
color.b = gl_FragCoord.y<300 ? 1 : 0;
color.a = gl_FragCoord.y<400 ? 1 : 0;
}
)");
glAttachShader(program, fragmentShader);
glLinkProgram(program);
GLint status=0;
glGetProgramiv(program, GL_LINK_STATUS, &status);
assert(glGetError()==GL_NO_ERROR);
assert(status);
glDetachShader(program, fragmentShader);
glDeleteShader(fragmentShader);
glDetachShader(program, vertexShader);
glDeleteShader(vertexShader);
}
void setupBuffers()
{
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
const GLfloat vertices[]=
{
-1, -1,
1, -1,
-1, 1,
1, 1,
};
glBufferData(GL_ARRAY_BUFFER, sizeof vertices, vertices, GL_STATIC_DRAW);
constexpr GLuint attribIndex=0;
constexpr int coordsPerVertex=2;
glVertexAttribPointer(attribIndex, coordsPerVertex, GL_FLOAT, false, 0, 0);
glEnableVertexAttribArray(attribIndex);
glBindVertexArray(0);
}
void setupRenderTarget()
{
glGenTextures(1, &texFBO);
glGenFramebuffers(1,&fbo);
glBindTexture(GL_TEXTURE_2D,texFBO);
glBindTexture(GL_TEXTURE_2D,0);
}
bool init()
{
if(!gladLoadGL())
{
std::cerr << "Failed to initialize GLAD\n";
return false;
}
if(!GLAD_GL_VERSION_3_3)
{
std::cerr << "OpenGL 3.3 not supported\n";
return false;
}
setupRenderTarget();
loadShaders();
setupBuffers();
return true;
}
bool inited=false;
void reshape(int width, int height)
{
::width=width;
::height=height;
std::cerr << "New size: " << width << "x" << height << "\n";
if(!inited)
{
if(!(inited=init()))
std::exit(1);
}
glViewport(0,0,width,height);
glBindTexture(GL_TEXTURE_2D,texFBO);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA32F,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,nullptr);
glBindTexture(GL_TEXTURE_2D,0);
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,GL_TEXTURE_2D,texFBO,0);
const auto status=glCheckFramebufferStatus(GL_FRAMEBUFFER);
assert(status==GL_FRAMEBUFFER_COMPLETE);
glBindFramebuffer(GL_FRAMEBUFFER,0);
}
void display()
{
if(!inited)
{
if(!(inited=init()))
std::exit(1);
}
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
glUseProgram(program);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
getMeanPixelValue(width, height);
// Show the texture on screen
glBindFramebuffer(GL_READ_FRAMEBUFFER,fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER,0);
glBlitFramebuffer(0,0,width,height,0,0,width,height,GL_COLOR_BUFFER_BIT,GL_NEAREST);
glFinish();
}
int main(int argc, char** argv)
{
glutInitContextVersion(3,3);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(width, height);
glutCreateWindow("Test");
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMainLoop();
}
I get the following output when I resize the window vertically:
New size: 512x512
Mipmap value: 0.195312, 0.390625, 0.585938, 0.78125
True average: 0.195312, 0.390625, 0.585938, 0.78125
New size: 512x511
Mipmap value: 0, 0, 1, 1
True average: 0.195695, 0.391389, 0.587084, 0.782779
New size: 512x479
Mipmap value: 0, 0.00123596, 1, 1
True average: 0.208768, 0.417537, 0.626305, 0.835073
New size: 512x453
Mipmap value: 0, 0.125, 1, 1
True average: 0.220751, 0.441501, 0.662252, 0.883002
The above mipmap values are not simply imprecise average – they are not even close to corresponding averages, aside from the power-of-two size!
This is on Kubuntu 18.04 with the following information from glxinfo:
Vendor: Intel Open Source Technology Center (0x8086)
Device: Mesa DRI Intel(R) Haswell Server (0x41a)
Version: 18.2.2
So, what's happening here? Is the deepest mipmap level not an average of all the texels in the texture? Or is it simply a bug in the OpenGL implementation?

How can I pass the vertices position to my vertex shader. I wrote a shader that is not drawing anything to the screen

I wrote a shader program that is not drawing anything to the screen, I think it's because I may be missed something, I don't know how to pass the vertices position to it.
My vertex shader is:
#version 130
in vec2 vertexPosition;
void main()
{
gl_Position.xy=vertexPosition;
gl_Position.z=-1.0;
gl_Position.w=1.0;
}
My fragment shader is:
#version 130
out vec3 color;
void main()
{
color=vec3(1.0,0.0,0.0);
}
this is the code:
GLfloat triangle []
{
200,200,
400,200,
400,400
};
//translating the coordinates
glViewport(0,0,640,480);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0,640,0,480,0,1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
GLuint triangleBufferID;//store the identifier of this buffer
glGenBuffers(1, &triangleBufferID);
glBindBuffer(GL_ARRAY_BUFFER, triangleBufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(triangle), triangle, GL_STATIC_DRAW); //describe the data in the buffer
glEnableVertexAttribArray(0); //enable the buffer
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, 0); //Get the pointer for the buffer.
//SECOND SHADER TYPE (READ FROM FILE):
const GLchar* vertexshaderPath = "vertexshader.vsh";
const GLchar* fragmentshaderPath = "fragmentshader.fsh";
string vertexshaderSource = ""; //getting a string to store the source code of vertexshader.vsh
string fragmentshaderSource = ""; //getting a string to store the source code of fragmentshader.fsh
ifstream vertexfile; //getting a file pointer for vertexshader.vsh;
ifstream fragmentfile; //getting a file pointer for fragmentshader.fsh;
vertexfile.exceptions (ifstream::badbit); //add exceptions to the file pointer;
fragmentfile.exceptions (ifstream::badbit); //add exceptions to the file pointer;
try
{
vertexfile.open(vertexshaderPath); //open vertexshader.vsh
fragmentfile.open(fragmentshaderPath); //open fragmentshader.fsh
stringstream vfstream, ffstream; //get two stringstream object;
vfstream << vertexfile.rdbuf(); //get the content of vertexshader.vsh into a stringstream;
ffstream << fragmentfile.rdbuf(); //get the content of fragmentshader.fsh into a stringstream;
vertexfile.close(); //close the file;
fragmentfile.close(); //close the file;
vertexshaderSource=vfstream.str(); //copy the string from stringstream into vertexshaderSource;
fragmentshaderSource=ffstream.str(); //copy the string from stringstream into fragmentshaderSource;
}
catch (ifstream::failure e) //if failure caught...
{
cout << "Error, file is unreadable!" << endl;
}
const GLchar* vscode = vertexshaderSource.c_str();
//converted into c_str();
const GLchar* fscode = fragmentshaderSource.c_str();
//converted into c_str();
//THIS PART FOR ALL WAYS:
GLuint vertexshaderID=glCreateShader(GL_VERTEX_SHADER); //create a shader
glShaderSource(vertexshaderID,1,&vscode,nullptr);
glCompileShader(vertexshaderID); //compile shader;
GLint success;
GLchar infolog[512];
glGetShaderiv(vertexshaderID, GL_COMPILE_STATUS, &success);
if(!success) //check the compilation results
{
glGetShaderInfoLog(vertexshaderID,512,0,infolog);
cout << "Error vertex shader's compilation failed" << endl;
cout << infolog << endl;
}
GLuint fragmentshaderID=glCreateShader(GL_FRAGMENT_SHADER); //create a shader
glShaderSource(fragmentshaderID,1,&fscode, nullptr);
glCompileShader(fragmentshaderID); //compile shader
glGetShaderiv(fragmentshaderID,GL_COMPILE_STATUS,&success);
if(!success) //check the compilation results
{
glGetShaderInfoLog(fragmentshaderID,512,0,infolog);
cout << "Error fragment shader's compilation failed" << endl;
cout << infolog << endl;
}
GLuint programID = glCreateProgram(); //create a program;
glAttachShader(programID, vertexshaderID); //attach vertexshader to the program;
glAttachShader(programID, fragmentshaderID); //attach fragmentshader to the program;
glBindAttribLocation(programID, 0, "vertexPosition");
glUniform3f(glGetUniformLocation(programID, "color"),1.0,0.0,0.0);
glLinkProgram(programID); //link the pieces of the program;
glGetProgramiv(programID, GL_LINK_STATUS, &success);
if(!success) //check the link status;
{
glGetProgramInfoLog(programID,512,0,infolog);
cout << "Error linking the program" << endl;
cout << infolog << endl;
}
// glDeleteShader(vertexshaderID);
// glDeleteShader(fragmentshaderID);
glUseProgram(programID); //use the program;
glDrawArrays(GL_TRIANGLES, 0, 3);
SDL_GL_SwapWindow(window);
There are several problems with the code:
There is code used to initialize buffers and shaders mixed with code that has to be called in each frame. For example, shader loading should definitely be done only once, but the draw-calls have to happen every frame.
Then, you are mixing fixed-function code (glMatrixMode and related functions) with core profile code. Most probabibly the problem is, that all the transformations applied here are not used (since the matrix stack is not used when working in core profile with shaders). Thus the coordinates used (from 200 to 400) are outside of the default view frustum (ranging from -1 to 1). You will have to implement the transformations somewhere in the shader to get a correct projection of your points to NDC.
Minor: There is no guarantee that vertexPosition will be on location 0 (although this might be the case).

openGL migration from SFML to glut, vertices arrays or display lists are not displayed

Due to using quad buffered stereo 3D (which i have not included yet), i need to migrate my openGL program from a SFML window to a glut window.
With SFML my vertices and display list were properly displayed, now with glut my window is blank white (or another color depending on the way i clear it).
Here is the code to initialise the window :
int type;
int stereoMode = 0;
if ( stereoMode == 0 )
type = GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH;
else
type = GLUT_DOUBLE | GLUT_RGB | GLUT_DEPTH | GLUT_STEREO;
glutInitDisplayMode(type);
int argc = 0;
char *argv = "";
glewExperimental = GL_TRUE;
glutInit(&argc, &argv);
bool fullscreen = false;
glutInitWindowSize(width,height);
int win = glutCreateWindow(title.c_str());
glutSetWindow(win);
assert(win != 0);
if ( fullscreen ) {
glutFullScreen();
width = glutGet(GLUT_SCREEN_WIDTH);
height = glutGet(GLUT_SCREEN_HEIGHT);
}
GLenum err = glewInit();
if (GLEW_OK != err) {
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
}
glutDisplayFunc(loop_function);
This is the only code i had to change for now, but here is the code i used with sfml and displayed my objects in the loop, if i change the value of glClearColor, the window's background does change color so the opengl context seems to be working :
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glClearColor(255.0f, 255.0f, 255.0f, 0.0f);
glLoadIdentity();
sf::Time elapsed_time = clock.getElapsedTime();
clock.restart();
camera->animate(elapsed_time.asMilliseconds());
camera->look();
for (auto i = objects->cbegin(); i != objects->cend(); ++i)
(*i)->draw(camera);
glutSwapBuffers();
Is there any other changes i should have done switching to glut ? that would be great if someone could enlighten me on the subject.
In addition to that, i found out that adding too many objects (that were well handled before with SFML), openGL gives error 1285: out of memory. Maybe this is related.
EDIT :
Here is the code i use to draw each object, maybe it is the problem :
GLuint LightID = glGetUniformLocation(this->shaderProgram, "LightPosition_worldspace");
if(LightID ==-1)
cout << "LightID not found ..." << endl;
GLuint MaterialAmbientID = glGetUniformLocation(this->shaderProgram, "MaterialAmbient");
if(LightID ==-1)
cout << "LightID not found ..." << endl;
GLuint MaterialSpecularID = glGetUniformLocation(this->shaderProgram, "MaterialSpecular");
if(LightID ==-1)
cout << "LightID not found ..." << endl;
glm::vec3 lightPos = glm::vec3(0,150,150);
glUniform3f(LightID, lightPos.x, lightPos.y, lightPos.z);
glUniform3f(MaterialAmbientID, MaterialAmbient.x, MaterialAmbient.y, MaterialAmbient.z);
glUniform3f(MaterialSpecularID, MaterialSpecular.x, MaterialSpecular.y, MaterialSpecular.z);
// Get a handle for our "myTextureSampler" uniform
GLuint TextureID = glGetUniformLocation(shaderProgram, "myTextureSampler");
if(!TextureID)
cout << "TextureID not found ..." << endl;
glActiveTexture(GL_TEXTURE0);
sf::Texture::bind(texture);
glUniform1i(TextureID, 0);
// 2nd attribute buffer : UV
GLuint vertexUVID = glGetAttribLocation(shaderProgram, "color");
if(vertexUVID==-1)
cout << "vertexUVID not found ..." << endl;
glEnableVertexAttribArray(vertexUVID);
glBindBuffer(GL_ARRAY_BUFFER, color_array_buffer);
glVertexAttribPointer(vertexUVID, 2, GL_FLOAT, GL_FALSE, 0, 0);
GLuint vertexNormal_modelspaceID = glGetAttribLocation(shaderProgram, "normal");
if(!vertexNormal_modelspaceID)
cout << "vertexNormal_modelspaceID not found ..." << endl;
glEnableVertexAttribArray(vertexNormal_modelspaceID);
glBindBuffer(GL_ARRAY_BUFFER, normal_array_buffer);
glVertexAttribPointer(vertexNormal_modelspaceID, 3, GL_FLOAT, GL_FALSE, 0, 0 );
GLint posAttrib;
posAttrib = glGetAttribLocation(shaderProgram, "position");
if(!posAttrib)
cout << "posAttrib not found ..." << endl;
glEnableVertexAttribArray(posAttrib);
glBindBuffer(GL_ARRAY_BUFFER, position_array_buffer);
glVertexAttribPointer(posAttrib, 3, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, elements_array_buffer);
glDrawElements(GL_TRIANGLES, indices.size(), GL_UNSIGNED_INT, 0);
GLuint error;
while ((error = glGetError()) != GL_NO_ERROR) {
cerr << "OpenGL error: " << error << endl;
}
disableShaders();
The code is fine, migrating from SFML to glut doesn't need a lot of changes but you will have to change the textures if you used SFML texture object. The only way you are not seeing anything else than your background changing color is simply because your camera is not looking at your object.
I advise you check the code of your view and or post it.

Use of VAO+IBO with GLSL Shaders

I'm attempting to use VAO's+VBO's+IBO's with shaders, but no object gets drawn. I am not sure what I am missing. I am pretty new to C++, and GLSL, so I am not sure if I am screwing something up with the C++ in general, or if I am failing to handle the OpenGL context correctly?
The main function (code for handling window creation is missing. If you think you may need to review it as well, just let me know.):
int main(int argc, char *argv[])
{
//INIT SDL
SDL_Init(SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(400, 300, SDL_WINDOW_OPENGL, &displayWindow, &displayRenderer);
SDL_GetRendererInfo(displayRenderer, &displayRendererInfo);
/*TODO: Check that we have OpenGL */
if ((displayRendererInfo.flags & SDL_RENDERER_ACCELERATED) == 0 || (displayRendererInfo.flags & SDL_RENDERER_TARGETTEXTURE) == 0) {}
SDL_GL_CreateContext(displayWindow);
//SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
glewInit();
int error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glewInit call: " << error << "\n"; };
//glEnable(GL_DEBUG_OUTPUT);
Display_InitGL();
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during Display init: " << error << "\n"; };
Display_SetViewport(400, 300);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during Display Set Viewport Issue: " << error << "\n"; };
// SET UP TEST OBJ
MainChar *player = new MainChar();
player->MainChar_VBO_Func();
GLushort size = player->MainChar_VBO_IndexBuffer_Func();
float count = 0.0;
// END SET UP OF TEST OBJ
GLint *length = new GLint;
const char* vertShdr = readFile("C:\\Users\\JRFerrell\\Documents\\Visual Studio 2013\\Projects\\GLEW Practice\\vertShader.vs", *length);
std::cout << vertShdr;
GLuint vertShaderId = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertShaderId, 1, &vertShdr, length);
std::cout << "\n\nLength: " << *length;
glCompileShader(vertShaderId);
GLint *length2 = new GLint;
const char* fragShdr = readFile("C:\\Users\\JRFerrell\\Documents\\Visual Studio 2013\\Projects\\GLEW Practice\\fragShader.fs", *length2);
GLint fragShaderId = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragShaderId, 1, &fragShdr, length2);
glCompileShader(fragShaderId);
GLuint shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertShaderId);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glAttachShader: " << error << "\n"; };
glAttachShader(shaderProgram, fragShaderId);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glAttachShader: " << error << "\n"; };
glBindAttribLocation(shaderProgram, 0, "in_Position");
glBindAttribLocation(shaderProgram, 1, "in_Normal");
glLinkProgram(shaderProgram);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error during glLinkProgram: " << error << "\n"; };
// END SHADER PROGRAM DEFINITION
//Check info log for errors:
int Len = 0;
char *Buffer = nullptr;
glGetShaderiv(vertShaderId, GL_INFO_LOG_LENGTH, &Len);
Buffer = new char[Len];
glGetShaderInfoLog(vertShaderId, Len, &Len, Buffer);
std::cout << "Vertex Log:" << std::endl << Buffer << std::endl;
delete[] Buffer;
glGetShaderiv(fragShaderId, GL_INFO_LOG_LENGTH, &Len);
Buffer = new char[Len];
glGetShaderInfoLog(fragShaderId, Len, &Len, Buffer);
std::cout << "Fragment Log:" << std::endl << Buffer << std::endl;
delete[] Buffer;
glGetProgramiv(shaderProgram, GL_INFO_LOG_LENGTH, &Len);
Buffer = new char[Len];
glGetProgramInfoLog(shaderProgram, Len, &Len, Buffer);
std::cout << "Shader Log:" << std::endl << Buffer << std::endl;
delete[] Buffer;
// Create VAO. Don't forget to enable all necessary states because the VAO starts with default state, cleaning all states prev called to do so.
GLuint VaoId;
glGenVertexArrays(1, &VaoId);
glBindVertexArray(VaoId);
// Bind buffers & set-up VAO vertex pointers
glBindBuffer(GL_ARRAY_BUFFER, player->vboID);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glBindBuffer-vboID: " << error << "\n"; }
glEnableClientState(GL_VERTEX_ARRAY);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 6 * GL_FLOAT, (const GLvoid *)0);
glEnableVertexAttribArray(0);
// Set-up VAO normal pointers
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glBindBuffer-vbo init: " << error << "\n"; }
glEnableClientState(GL_NORMAL_ARRAY);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * GL_FLOAT, (void*)(3 * sizeof(GL_FLOAT)));
glEnableVertexAttribArray(1);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, player->vboIndexID);
GLint maxLength, nAttribs;
glGetProgramiv(shaderProgram, GL_ACTIVE_ATTRIBUTES, &nAttribs);
glGetProgramiv(shaderProgram, GL_ACTIVE_ATTRIBUTES, &maxLength);
//std::cout << "\nmax length: " << maxLength << "\nnAttribs: " << nAttribs;
glBindVertexArray(0);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glBindVertexArray: " << error << "\n"; };
// End VAO init
while (1){
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error glClearColor: " << error << "\n"; };
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
error = glGetError();
if (error != GL_NO_ERROR){ std::cout << "Error in glClear: " << error << "\n"; };
glLoadIdentity();
glUseProgram(shaderProgram);
glBindVertexArray(VaoId);
glDrawElements(GL_TRIANGLES, size, GL_UNSIGNED_SHORT, 0);
glUseProgram(0);
glBindVertexArray(0);
SDL_GL_SwapWindow(displayWindow);
count -= .1;
}
SDL_Delay(5000);
SDL_Quit();
return 0;
}
::The shader code::
Vertex shader:
#version 400
in vec3 in_Position;
in vec3 in_Normal;
void main()
{
gl_Position = vec4(in_Position, 1.0);
}
Fragment shader:
#version 400
out vec4 FragColor;
void main()
{
FragColor = vec4(0.0f, 0.5f, 1.0f, 1.0f);
}
I did look at similar questions on here already, and they did help me fix a few possible issues, but so far, they obviously haven't proven useful in helping me get my code up and running. I also asked some other people in real time chat on gamedev.net, but they couldn't seem to see where I went wrong either. I fixed a possible issue with declaring glDoubles rather than floats, but that was actually working without the vao and shaders, so that is not (and unlikely ever was) the issue, in whole or part.
I don't know if any of the following will solve your problem, but I do see some issues in your code:
glEnableClientState(GL_VERTEX_ARRAY);
You are mixing here old and deprecated builtin vertex atttributes with the generic vertex attributes. You don't need any of these glEnableClientState calls - your shader doesn't use the builtin attributes. The same goes for the glLoadIdentity which is also totally unneeded and would be invalid in a core profile context.
The second thing I see is that you do not specify your attribute indices, so the GL is free to map them. You also don't query them, but just assume them to be 0 for in_Position and 1 for in_Normal - which is by no means guaranteed to be the case. Use the layout(location=) qualifiers when declaring your input attributes in your vertex shader to actually define the mapping, or use glBindAttribLocation.
quickly looking over your code I am struggling to find where you are sending the BufferData to the GPU.
Generate and Bind new buffer
Initialise Buffers to take data.
Send data using glBufferSubData...
Repeat steps 1 through 3 for Element Arrays.
Generate and Bind Vertex Array Object.
Setup VertexAttribArray Pointers and bind them to your shader.
Bind Element Buffer once again.
Unbind Vertex Array using glBindVertexArray(0)
This is how I setup my buffers using OpenTK, the code should be fairly understandable and useful in any case:
// Generate Vertex Buffer Object and bind it so it is current.
GL.GenBuffers(1, out bufferHandle);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferHandle);
// Initialise storage space for the Vertex Buffer.
GL.BufferData(BufferTarget.ArrayBuffer, bufferSize, IntPtr.Zero, BufferUsageHint.StaticDraw);
// Send Position data.
GL.BufferSubData<Vector3>(
BufferTarget.ArrayBuffer, noOffset, new IntPtr(sizeOfPositionData), bufferObject.PositionData);
// Send Normals data, offset by size of Position data.
GL.BufferSubData<Vector3>(
BufferTarget.ArrayBuffer, new IntPtr(sizeOfPositionData), new IntPtr(sizeOfNormalsData), bufferObject.NormalsData);
// Generate Element Buffer Object and bind it so it is current.
GL.GenBuffers(1, out bufferHandle);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, bufferHandle);
GL.BufferData(
BufferTarget.ElementArrayBuffer, new IntPtr(sizeof(uint) * bufferObject.IndicesData.Length), bufferObject.IndicesData, BufferUsageHint.StaticDraw);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferObject.VboID);
GL.BindBuffer(BufferTarget.ElementArrayBuffer, bufferObject.IboID);
// Generate Vertex Array Object and bind it so it is current.
GL.GenVertexArrays(1, out bufferHandle);
GL.BindVertexArray(bufferHandle);
bufferHandle = GL.GetAttribLocation(program, "in_position");
GL.EnableVertexAttribArray(bufferHandle);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferObject.VboID);
GL.VertexAttribPointer(0, 3, VertexAttribPointerType.Float, true, Vector3.SizeInBytes, 0);
GL.BindAttribLocation(program, bufferHandle, "in_position");
bufferHandle = GL.GetAttribLocation(program, "in_normal");
GL.EnableVertexAttribArray(bufferHandle);
GL.BindBuffer(BufferTarget.ArrayBuffer, bufferObject.VboID);
GL.VertexAttribPointer(1, 3, VertexAttribPointerType.Float, true, Vector3.SizeInBytes, sizeOfPositionData);
GL.BindAttribLocation(program, bufferHandle, "in_normal");
GL.BindBuffer(BufferTarget.ElementArrayBuffer, bufferObject.IboID);
// IMPORTANT: vertex array needs unbinding here to avoid rendering incorrectly
GL.BindVertexArray(0);
Well, after sitting down and reading the docs for version 4.0, I learned that I had screwed up on my attrib pointers by passing incorrect stride and pointers to the start of the buffer data. My thought was that the stride was the size of the element type multiplied by the number of attribute elements, so you'd get the next attribute you were looking for. Obviously that is not what you are supposed to do. I changed that to zero since my attribs are back to back:
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 6 * GL_FLOAT, (void*)(3 * sizeof(GL_FLOAT)));"
-->
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (void*)(3 * sizeof(GL_FLOAT)));"
Then the pointer I tried handling almost the same exact way. Should have been a null pointer to the first buffer attrib location:
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (void*)(3 * sizeof(GL_FLOAT)));"
-->
"glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, (GLubyte *)NULL);"
Once I actually sat down and read the docs closely, I understood what actually belonged there. Now the shaders are working and I can work on the cool stuff... :P Thanks for the efforts to answer my question anyways everyone. :)