Meaning of "index" parameter in glEnableVertexAttribArray and (possibly) a bug in the OS X OpenGL implementation - c++

1) Do I understand correctly that to draw using vertex arrays or VBOs I need for all my attributes to either call glBindAttribLocation before the shader program linkage or call glGetAttribLocation after the shader program was successfully linked and then use the bound/obtained index in the glVertexAttribPointer and glEnableVertexAttribArray calls?
To be more specific: these three functions - glGetAttribLocation, glVertexAttribPointer and glEnableVertexAttribArray - they all have an input parameter named "index". Is it the same "index" for all the three? And is it the same thing as the one returned by glGetAttribLocation?
If yes:
2) I've been facing a problem on OS X, I described it here: https://stackoverflow.com/questions/28093919/using-default-attribute-location-doesnt-work-on-osx-osx-opengl-bug , but unfortunately didn't get any replies.
The problem is that depending on what attribute locations I bind to my attributes I do or do not see anything on the screen. I only see this behavior on my MacBook Pro with OS X 10.9.5; I've tried running the same code on Linux and Windows and it seems to work on those platforms independently from which locations are my attributes bound to.
Here is a code example (which is supposed to draw a red triangle on the screen) that exhibits the problem:
#include <iostream>
#include <GLFW/glfw3.h>
GLuint global_program_object;
GLint global_position_location;
GLint global_aspect_ratio_location;
GLuint global_buffer_names[1];
int LoadShader(GLenum type, const char *shader_source)
{
GLuint shader;
GLint compiled;
shader = glCreateShader(type);
if (shader == 0)
return 0;
glShaderSource(shader, 1, &shader_source, NULL);
glCompileShader(shader);
glGetShaderiv(shader, GL_COMPILE_STATUS, &compiled);
if (!compiled)
{
GLint info_len = 0;
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &info_len);
if (info_len > 1)
{
char* info_log = new char[info_len];
glGetShaderInfoLog(shader, info_len, NULL, info_log);
std::cout << "Error compiling shader" << info_log << std::endl;
delete info_log;
}
glDeleteShader(shader);
return 0;
}
return shader;
}
int InitGL()
{
char vertex_shader_source[] =
"attribute vec4 att_position; \n"
"attribute float dummy;\n"
"uniform float uni_aspect_ratio; \n"
"void main() \n"
" { \n"
" vec4 test = att_position * dummy;\n"
" mat4 mat_projection = \n"
" mat4(1.0 / uni_aspect_ratio, 0.0, 0.0, 0.0, \n"
" 0.0, 1.0, 0.0, 0.0, \n"
" 0.0, 0.0, -1.0, 0.0, \n"
" 0.0, 0.0, 0.0, 1.0); \n"
" gl_Position = att_position; \n"
" gl_Position *= mat_projection; \n"
" } \n";
char fragment_shader_source[] =
"void main() \n"
" { \n"
" gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0); \n"
" } \n";
GLuint vertex_shader;
GLuint fragment_shader;
GLuint program_object;
GLint linked;
vertex_shader = LoadShader(GL_VERTEX_SHADER , vertex_shader_source );
fragment_shader = LoadShader(GL_FRAGMENT_SHADER, fragment_shader_source);
program_object = glCreateProgram();
if(program_object == 0)
return 1;
glAttachShader(program_object, vertex_shader );
glAttachShader(program_object, fragment_shader);
// Here any index except 0 results in observing the black screen
glBindAttribLocation(program_object, 1, "att_position");
glLinkProgram(program_object);
glGetProgramiv(program_object, GL_LINK_STATUS, &linked);
if(!linked)
{
GLint info_len = 0;
glGetProgramiv(program_object, GL_INFO_LOG_LENGTH, &info_len);
if(info_len > 1)
{
char* info_log = new char[info_len];
glGetProgramInfoLog(program_object, info_len, NULL, info_log);
std::cout << "Error linking program" << info_log << std::endl;
delete info_log;
}
glDeleteProgram(program_object);
return 1;
}
global_program_object = program_object;
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glUseProgram(global_program_object);
global_position_location = glGetAttribLocation (global_program_object, "att_position");
global_aspect_ratio_location = glGetUniformLocation(global_program_object, "uni_aspect_ratio");
GLfloat vertices[] = {-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f};
glGenBuffers(1, global_buffer_names);
glBindBuffer(GL_ARRAY_BUFFER, global_buffer_names[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 9, vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
return 0;
}
void Render()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glUseProgram(global_program_object);
glBindBuffer(GL_ARRAY_BUFFER, global_buffer_names[0]);
glVertexAttribPointer(global_position_location, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(global_position_location);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(global_position_location);
glUseProgram(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
}
void FreeGL()
{
glDeleteBuffers(1, global_buffer_names);
glDeleteProgram(global_program_object);
}
void SetViewport(int width, int height)
{
glViewport(0, 0, width, height);
glUseProgram(global_program_object);
glUniform1f(global_aspect_ratio_location, static_cast<GLfloat>(width) / static_cast<GLfloat>(height));
}
int main(void)
{
GLFWwindow* window;
if (!glfwInit())
return -1;
window = glfwCreateWindow(640, 480, "Hello World", NULL, NULL);
if (!window)
{
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
InitGL();
// Double the resolution to correctly draw with Retina display
SetViewport(1280, 960);
while (!glfwWindowShouldClose(window))
{
Render();
glfwSwapBuffers(window);
glfwPollEvents();
}
FreeGL();
glfwTerminate();
return 0;
}
Does this look like a bug to you? Can anyone reproduce it? If it's a bug where should I report it?
P.S.
I've also tried SDL instead of GLFW, the behavior is the same...

The behavior you see is actually correct as per the spec, and MacOSX has something to do with this, but only in a very indirect way.
To answer question 1) first: You are basically correct. With modern GLSL (>=3.30), you can also specifiy the desired index via the layout(location=...) qualifier directly in the shader code, instead of using glBindAttribLocation(), but that is only a side note.
The problem you are facing is that you are using a legacy GL context. You do not specify a desired version, so you will get maximum compatibility to the old way. Now on windows, you are very likely to get a compatibility profile of the highest version supported by the implementation (typically GL3.x or GL4.x on non-ancient GPUs).
However, on OSX, you are limited to at most GL2.1. And this is where the crux lies: your code is invalid in GL2.x. To explain this, I have to go back in GL history. In the beginning, there was the immediate mode, so you did draw by
glBegin(primType);
glColor3f(r,g,b);
glVertex3f(x,y,z);
[...]
glColor3f(r,g,b);
glVertex3f(x,y,z);
glEnd();
Note that the glVertex call is what actually creates a vertex. All other per-vertex attributes are basically some current vertex state which can be set any time, but calling glVertex will take all of those current attributes together with the position to form the vertex which is fed to the pipeline.
Now when vertex arrays were added, we got functions like glVertexPointer(), glColorPointer() and so on, and each attribute array could be enabled or disabled separately via glEnableClientState(). The array-based draw calls are actually defined in terms of the immediate mode in the OpenGL 2.1 specification as glDrawArrays(GLenum mode, GLint first, GLsizei count) being equivalent to
glBegin(mode);
for (i=0; i<count; i++)
ArrayElement(first + i);
glEnd();
with ArrayElement(i) being defined (this one is derived from the wording of theGL 1.5 spec):
if ( normal_array_enabled )
Normal3...( <i-th normal value> );
[...] // similiar for all other bultin attribs
if ( vertex_array_enabled)
Vertex...( <i-th vertex value> );
This definition has some sublte consequence: You must have the GL_VERTEX_ARRAY attribute array enabled, otherwise nothing will be drawn, since no equivalent of glVertex calls are generated.
Now when the generic attributes were added in GL2.0, a special guarantee was made: generic attribute 0 is aliasing the builtin glVertex attribute - so both can be used interchangeably, in immediate mode as well as in arrays. So glVertexAttrib3f(0,x,y,z) "creates" a vertex the same way glVertex3f(x,y,z) would have. And using an array with glEnableVertexAttribArray(0) is as good as glEnableClientState(GL_VERTEX_ARRAY).
In GL 2.1, the ArrayElement(i) function now looks as follows:
if ( normal_array_enabled )
Normal3...( <i-th normal value> );
[...] // similiar for all other bultin attribs
for (a=1; a<max_attribs; a++) {
if ( generic_attrib_a_enabled )
glVertexAttrib...(a, <i-th value of attrib a> );
}
if ( generic_attrib_0_enabled)
glVertexAttrib...(0, <i-th value of attrib 0> );
else if ( vertex_array_enabled)
Vertex...( <i-th vertex value> );
Now this is what happens to you. You absolutely need attribute 0 (or the old GL_VERTEX_ARRAY attribute) to be enabled for this to generate any vertices for the pipeline.
Note that it should be possible in theory to just enable attribute 0, no matter if it is used in the shader or not. You should just make sure that the corresponding attrib pointer pionts to valid memory, to be 100% safe. So you simply could check if your attribute index 0 is used, and if not, just set the same pointer as attribute 0 as you did for your real attribute, and the GL should be happy. But I haven't tried this.
In more modern GL, these requirements are not there anymore, and drawing without attribute 0 will work as intended, and that is what you saw on those other systems. Maybe you should consider switching to modern GL, say >= 3.2 core profile, where the issue will not be present (but you need to update your code a bit, including the shaders).

Related

Memory barrier problems for writing and reading an image OpenGL

i'm having a problem trying to reading an image from a fragment shader, first i write into the image in shader porgram A (im just painting blue on the image) then i'm reading from another shader program B to display the image, but the reading part is not getting the right color i'm getting a black image
Unexpected result
This is my application code:
void GLAPIENTRY MessageCallback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message, const void* userParam)
{
std::cout << "GL CALLBACK: type = " << std::hex << type << ", severity = " << std::hex << severity << ", message = " << message << "\n"
<< (type == GL_DEBUG_TYPE_ERROR ? "** GL ERROR **" : "") << std::endl;
}
class ImgRW
: public Core
{
public:
ImgRW()
: Core(512, 512, "JFAD")
{}
virtual void Start() override
{
glEnable(GL_DEBUG_OUTPUT);
glDebugMessageCallback(MessageCallback, nullptr);
shader_w = new Shader("w_img.vert", "w_img.frag");
shader_r = new Shader("r_img.vert", "r_img.frag");
glGenTextures(1, &space);
glBindTexture(GL_TEXTURE_2D, space);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA32F, 512, 512);
glBindImageTexture(0, space, 0, GL_FALSE, 0, GL_READ_WRITE, GL_RGBA32F);
glGenVertexArrays(1, &vertex_array);
glBindVertexArray(vertex_array);
}
virtual void Update() override
{
shader_w->use(); // writing shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glMemoryBarrier(GL_TEXTURE_FETCH_BARRIER_BIT | GL_SHADER_IMAGE_ACCESS_BARRIER_BIT);
shader_r->use(); // reading shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
}
virtual void End() override
{
delete shader_w;
delete shader_r;
glDeleteTextures(1, &space);
glDeleteVertexArrays(1, &vertex_array);
}
private:
Shader* shader_w;
Shader* shader_r;
GLuint vertex_array;
GLuint space;
};
#if 1
CORE_MAIN(ImgRW)
#endif
and these are my fragment shaders:
Writing to image
Code glsl:
#version 430 core
layout (binding = 0, rgba32f) uniform image2D img;
out vec4 out_color;
void main()
{
imageStore(img, ivec2(gl_FragCoord.xy), vec4(0.0f, 0.0f, 1.0f, 1.0f));
}
Reading from image
Code glsl:
#version 430 core
layout (binding = 0, rgba32f) uniform image2D img;
out vec4 out_color;
void main()
{
vec4 color = imageLoad(img, ivec2(gl_FragCoord.xy));
out_color = color;
}
The only way that i get the correct result is if i change the order of the drawing commands and i dont need the memory barriers, like this (in the Update fuction of above):
shader_r->use(); // reading shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
shader_w->use(); // writing shader
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
I don't know if the problem is the graphics card or the drivers or if i'm missing some kind of flag that enables memoryBarriers or if i put the wrong barrier bits or if i placed the barriers in the code in the wrong part
The Vertex shader for both shader programs is the next:
#version 430 core
void main()
{
vec2 v[4] = vec2[4]
(
vec2(-1.0, -1.0),
vec2( 1.0, -1.0),
vec2(-1.0, 1.0),
vec2( 1.0, 1.0)
);
vec4 p = vec4(v[gl_VertexID], 0.0, 1.0);
gl_Position = p;
}
and in my init function is:
void Window::init()
{
glfwInit();
window = glfwCreateWindow(getWidth(), getHeight(), name, nullptr, nullptr);
glfwMakeContextCurrent(window);
glfwSetFramebufferSizeCallback(window, framebufferSizeCallback);
glfwSetCursorPosCallback(window, cursorPosCallback);
//glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
assert(gladLoadGLLoader((GLADloadproc)glfwGetProcAddress) && "Couldn't initilaize OpenGL");
glEnable(GL_DEPTH_TEST);
}
and in my function run i'm calling my start, update and end functions
void Core::Run()
{
std::cout << glGetString(GL_VERSION) << std::endl;
Start();
float lastFrame{ 0.0f };
while (!window.close())
{
float currentFrame = static_cast<float>(glfwGetTime());
Time::deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
glViewport(0, 0, getWidth(), getHeight());
glClearBufferfv(GL_COLOR, 0, &color[0]);
glClearBufferfi(GL_DEPTH_STENCIL, 0, 1.0f, 0);
Update();
glfwSwapBuffers(window);
glfwPollEvents();
}
End();
}
glEnable(GL_DEPTH_TEST);
As I suspected.
Just because a fragment shader doesn't write a color output doesn't mean that those fragments will not affect the depth buffer. If the fragment passes the depth test and the depth write mask is on (assuming no other state is involved), it will update the depth buffer with the current fragment's depth (and the color buffer with uninitialized values, but that's a different matter).
Since you're drawing the same geometry both times, the second rendering's fragments will get the same depth values as the corresponding fragments from the first rendering. But the default depth function is GL_LESS. Since any value is not less than itself, this means that all fragments from the second rendering fail the depth test.
And therefore, they don't get rendered.
So just turn off the depth test. And while you're at it, turn off color writes for your "writing" rendering pass, since you're not writing to the color buffers.
Now, you do properly need the memory barrier between the two draw calls. But you only need the GL_SHADER_IMAGE_ACCESS_BARRIER_BIT, since that's how you're reading the data (via image load/store, not samplers).

Getting OpenGL ES Transform Feedback data back to the host using Emscripten

Using GLFW, the C++ OpenGL ES code below calculates the square roots of five numbers and outputs them on the command line. The code makes use of Transform Feedback. When I compile on Ubuntu 17.10, using the following command, I get the result I expect:
$ g++ tf.cpp -lGL -lglfw
If I use Emscripten, however, an exception is thrown, which indicates that
glMapBufferRange is only supported when access is MAP_WRITE|INVALIDATE_BUFFER. I do want to read rather than write, so perhaps I shouldn't use glMapBufferRange, but what can I use instead? I have tried on both Firefox and Chromium. The command I use to compile with Emscripten is:
$ em++ -std=c++11 tf.cpp -s USE_GLFW=3 -s USE_WEBGL2=1 -s FULL_ES3=1 -o a.out.html
The code follows:
#include <iostream>
#define GLFW_INCLUDE_ES3
#include <GLFW/glfw3.h>
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif
static const GLuint WIDTH = 800;
static const GLuint HEIGHT = 600;
static const GLchar *vertex_shader_src =
"#version 300 es\n"
"precision mediump float;\n"
"in float inValue;\n"
"out float outValue;\n"
"void main() {\n"
" outValue = sqrt(inValue);\n"
"}\n";
// Emscripten complains if there's no fragment shader
static const GLchar *fragment_shader_src =
"#version 300 es\n"
"precision mediump float;\n"
"out vec4 colour;\n"
"void main() {\n"
" colour = vec4(1.0, 1.0, 0.0, 1.0);\n"
"}\n";
static const GLfloat vertices[] = {
0.0f, 0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, -0.5f, 0.0f,
};
GLint get_shader_prog(const char *vert_src, const char *frag_src = "")
{
enum Consts { INFOLOG_LEN = 512 };
GLchar infoLog[INFOLOG_LEN];
GLint fragment_shader, vertex_shader, shader_program, success;
shader_program = glCreateProgram();
auto mk_shader = [&](GLint &shader, const GLchar **str, GLenum shader_type) {
shader = glCreateShader(shader_type);
glShaderSource(shader, 1, str, NULL);
glCompileShader(shader);
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
if (!success) {
glGetShaderInfoLog(shader, INFOLOG_LEN, NULL, infoLog);
std::cout << "ERROR::SHADER::COMPILATION_FAILED\n" << infoLog << '\n';
}
glAttachShader(shader_program, shader);
};
mk_shader(vertex_shader, &vert_src, GL_VERTEX_SHADER);
mk_shader(fragment_shader, &frag_src, GL_FRAGMENT_SHADER);
const GLchar* feedbackVaryings[] = { "outValue" };
glTransformFeedbackVaryings(shader_program, 1, feedbackVaryings,
GL_INTERLEAVED_ATTRIBS);
glLinkProgram(shader_program);
glGetProgramiv(shader_program, GL_LINK_STATUS, &success);
if (!success) {
glGetProgramInfoLog(shader_program, INFOLOG_LEN, NULL, infoLog);
std::cout << "ERROR::SHADER::PROGRAM::LINKING_FAILED\n" << infoLog << '\n';
}
glUseProgram(shader_program);
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
return shader_program;
}
int main(int argc, char *argv[])
{
glfwInit();
glfwWindowHint(GLFW_CLIENT_API, GLFW_OPENGL_ES_API);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
GLFWwindow *window = glfwCreateWindow(WIDTH, HEIGHT, __FILE__, NULL, NULL);
glfwMakeContextCurrent(window);
GLuint shader_prog = get_shader_prog(vertex_shader_src, fragment_shader_src);
GLint inputAttrib = glGetAttribLocation(shader_prog, "inValue");
glViewport(0, 0, WIDTH, HEIGHT);
GLuint vao;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
GLfloat data[] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f };
GLuint vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(data), data, GL_STATIC_DRAW);
glVertexAttribPointer(inputAttrib, 1, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(inputAttrib);
GLuint tbo;
glGenBuffers(1, &tbo);
glBindBuffer(GL_TRANSFORM_FEEDBACK_BUFFER, tbo);
glBufferData(GL_TRANSFORM_FEEDBACK_BUFFER, sizeof(data),
nullptr, GL_STATIC_READ);
glBindBufferBase(GL_TRANSFORM_FEEDBACK_BUFFER, 0, tbo);
glEnable(GL_RASTERIZER_DISCARD);
glBeginTransformFeedback(GL_POINTS);
glDrawArrays(GL_POINTS, 0, 5);
glEndTransformFeedback();
glDisable(GL_RASTERIZER_DISCARD);
glFlush();
GLfloat feedback[5]{1,2,3,4,5};
void *void_buf = glMapBufferRange(GL_TRANSFORM_FEEDBACK_BUFFER,0,
sizeof(feedback), GL_MAP_READ_BIT);
GLfloat *buf = static_cast<GLfloat *>(void_buf);
for (int i = 0; i < 5; i++)
feedback[i] = buf[i];
glUnmapBuffer(GL_TRANSFORM_FEEDBACK_BUFFER);
for (int i = 0; i < 5; i++)
std::cout << feedback[i] << ' ';
std::cout << std::endl;
glDeleteBuffers(1, &vbo);
glDeleteBuffers(1, &tbo);
glDeleteVertexArrays(1, &vao);
glfwTerminate();
return 0;
}
As has been mentioned by another answer, WebGL 2.0 is close to OpenGL ES 3.0, but confusingly does not define glMapBufferRange() function, so that Emscripten tries emulating only part of functionality of this function.
Real WebGL 2.0, however, exposes glGetBufferSubData() analog, which does not exist in OpenGL ES, but exist in desktop OpenGL. The method can be wrapped in code via EM_ASM_:
void myGetBufferSubData (GLenum theTarget, GLintptr theOffset, GLsizeiptr theSize, void* theData)
{
#ifdef __EMSCRIPTEN__
EM_ASM_(
{
Module.ctx.getBufferSubData($0, $1, HEAPU8.subarray($2, $2 + $3));
}, theTarget, theOffset, theData, theSize);
#else
glGetBufferSubData (theTarget, theOffset, theSize, theData);
#endif
}
Fetching VBO data back is troublesome in multi-platform code:
OpenGL 1.5+
glGetBufferSubData(): YES
glMapBufferRange(): YES
OpenGL ES 2.0
glGetBufferSubData(): NO
glMapBufferRange(): NO
OpenGL ES 3.0+
glGetBufferSubData(): NO
glMapBufferRange(): YES
WebGL 1.0
glGetBufferSubData(): NO
glMapBufferRange(): NO
WebGL 2.0
glGetBufferSubData(): YES
glMapBufferRange(): NO
So that:
Desktop OpenGL gives maximum flexibility.
OpenGL ES 2.0 and WebGL 1.0 give no chance to retrieve data back.
OpenGL ES 3.0+ gives only mapping buffer.
WebGL 2.0 gives only getBufferSubData().
WebGL2 does not support MapBufferRange because it would be a security nightmare. Instead it supports getBufferSubData. I have no idea if that is exposed to WebAssembly in emscripten. Emscripten is emulating MapBufferRange for the cases you mentioned by using bufferData. See here and here
If getBufferSubData is not supported you can add it. See the code in library_gl.js for how readPixels is implemented and use that as inspiration for how to expose getBufferSubData to WebAssembly. Either that or add some inline JavaScript with the _EM_ASM. I haven't done it but googling for "getBufferSubData emscripten" bought up this gist

Minimal C++ OpenGL Segmentation Fault

I am following the first instalment in Tom Dalling's Modern OpenGL tutorial and when I downloaded his code it worked perfectly. So then I wanted to create my own minimal version to help me understand how it all worked.
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <GLM/glm.hpp>
#include <stdexcept>
GLFWwindow* gWindow = NULL;
GLint gProgram = 0;
GLuint gVertexShader = 0;
GLuint gFragmentShader = 0;
GLuint gVAO = 0;
GLuint gVBO = 0;
int main() {
/** Initialise GLFW **/
if (!glfwInit()){
throw std::runtime_error("GLFW not initialised.");
}
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_COMPAT_PROFILE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
gWindow = glfwCreateWindow(800, 600, "OpenGL 3.2", NULL, NULL);
if(!gWindow) {
throw std::runtime_error("glfwCreateWindow failed.");
}
glfwMakeContextCurrent(gWindow);
/** Initialise GLEW **/
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) {
throw std::runtime_error("Glew not initialised.");
}
if (!GLEW_VERSION_3_2) {
throw std::runtime_error("OpenGL 3.2 not supported.");
}
/** Set up shaders **/
gVertexShader = glCreateShader(GL_VERTEX_SHADER); //Segmentation fault here
gFragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(gVertexShader, 1, (const GLchar* const*)R"(
#version 150
in vec3 vertexPos
out vec colour;
void main() {
colour = vertexPos;
gl_Position = vec4(vertexPos, 1);
}
)", NULL);
glShaderSource(gFragmentShader, 1, (const GLchar* const*)R"(
#version 150
in vec3 colour;
out vec4 fragColour;
void main() {
fragColour = colour;
}
)", NULL);
glCompileShader(gVertexShader);
glCompileShader(gFragmentShader);
/** Set up program **/
gProgram = glCreateProgram();
glAttachShader(gProgram, gVertexShader);
glAttachShader(gProgram, gFragmentShader);
glLinkProgram(gProgram);
glDetachShader(gProgram, gVertexShader);
glDetachShader(gProgram, gFragmentShader);
glDeleteShader(gVertexShader);
glDeleteShader(gFragmentShader);
/** Set up VAO and VBO **/
float vertexData[] = {
// X, Y, Z
0.2f, 0.2f, 0.0f,
0.8f, 0.2f, 0.0f,
0.5f, 0.8f, 0.0f,
};
glGenVertexArrays(1, &gVAO);
glBindVertexArray(gVAO);
glGenBuffers(1, &gVBO);
glBindBuffer(GL_ARRAY_BUFFER, gVBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(GL_FLOAT)*9, vertexData, GL_STATIC_DRAW);
glVertexAttribPointer(glGetAttribLocation(gProgram, "vertexPos"), 3, GL_FLOAT, GL_FALSE, 0, NULL);
/** Main Loop **/
while (!glfwWindowShouldClose(gWindow)) {
glfwPollEvents();
glBindBuffer(GL_ARRAY_BUFFER, gVBO);
glDrawArrays(GL_TRIANGLES, 0, 1);
glfwSwapBuffers(gWindow);
}
return 0;
}
This is what I created and it builds correctly. However when running it I get a segmentation fault upon calling glCreateShader.
I have done a lot of searching to find a solution to this and so far I've found two main reasons for this happening:
One is that glewExperimental needs to be set to GL_TRUE, however nothing changed after adding this line.
The second reason was that the OpenGL context was not being set, but I believe the call to glfwMakeContextCurrent covers this point.
From here I have no clue as of what to do, so any tips would be a great help!
EDIT: Thank you all for your help! I eventually managed to get it working and have uploaded the code to pastebin for future reference.
Problem solution
You program crashes not at the line you pointed, but on the first glShaderSource call. You actually pass a pointer to a string as the third argument, casting it to (const GLchar* const*). That's not what glShaderSource expects. It rather expects a pointer to an array of pointers to strings, whose (array of pointer's) length should be passed as the second argument (see documentation).
As a quick-n-dirty fix for your code, I declared the following:
const char *vs_source[] = {
"#version 150\n",
"in vec3 vertexPos\n",
"out vec colour;\n",
"void main() {\n",
" colour = vertexPos;\n",
" gl_Position = vec4(vertexPos, 1);\n",
"}\n"};
const char *fs_source[] = {
"#version 150\n",
"in vec3 colour;\n",
"out vec4 fragColour;\n",
"void main() {\n",
" fragColour = colour;\n",
"}\n"};
and replaced the calls to glShaderSource to
glShaderSource(gVertexShader, 7, (const char**)vs_source, NULL);
glShaderSource(gFragmentShader, 6, (const char**)fs_source, NULL);
Now the program works fine and shows a window as expected.
Hints
Your code did not even compile on my machine (I use gcc 4.8.3 # Linux) because of cast of a string literal to (const GLchar* const*). So try to set more restrictive flags in your compiler options and pay attention at compiler warnings. Moreover, treat warnings as errors.
Use some kind of memory profiler to determine the exact location of crashes. For example, I used Valgrind to localize the problem.

GLSL getting odd values back from my uniform and it seems to be set with the wrong value too

I'm having problems using a uniform in a vertex shader
heres the code
// gcc main.c -o main `pkg-config --libs --cflags glfw3` -lGL -lm
#include <GLFW/glfw3.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
void gluErrorString(const char* why,GLenum errorCode);
void checkShader(GLuint status, GLuint shader, const char* which);
float verts[] = {
-0.5f, 0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
-0.5f, -0.5f, 0.0f
};
const char* vertex_shader =
"#version 330\n"
"in vec3 vp;\n"
"uniform float u_time;\n"
"\n"
"void main () {\n"
" vec4 p = vec4(vp, 1.0);\n"
" p.x = p.x + u_time;\n"
" gl_Position = p;\n"
"}";
const char* fragment_shader =
"#version 330\n"
"out vec4 frag_colour;\n"
"void main () {\n"
" frag_colour = vec4 (0.5, 0.0, 0.5, 1.0);\n"
"}";
int main () {
if (!glfwInit ()) {
fprintf (stderr, "ERROR: could not start GLFW3\n");
return 1;
}
glfwWindowHint (GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint (GLFW_CONTEXT_VERSION_MINOR, 2);
//glfwWindowHint (GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint (GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
GLFWwindow* window = glfwCreateWindow (640, 480, "Hello Triangle", NULL, NULL);
if (!window) {
fprintf (stderr, "ERROR: could not open window with GLFW3\n");
glfwTerminate();
return 1;
}
glfwMakeContextCurrent (window);
// vert arrays group vert buffers together unlike GLES2 (no vert arrays)
// we *must* have one of these even if we only need 1 vert buffer
GLuint vao = 0;
glGenVertexArrays (1, &vao);
glBindVertexArray (vao);
GLuint vbo = 0;
glGenBuffers (1, &vbo);
glBindBuffer (GL_ARRAY_BUFFER, vbo);
// each vert takes 3 float * 4 verts in the fan = 12 floats
glBufferData (GL_ARRAY_BUFFER, 12 * sizeof (float), verts, GL_STATIC_DRAW);
gluErrorString("buffer data",glGetError());
glEnableVertexAttribArray (0);
glBindBuffer (GL_ARRAY_BUFFER, vbo);
// 3 components per vert
glVertexAttribPointer (0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
gluErrorString("attrib pointer",glGetError());
GLuint vs = glCreateShader (GL_VERTEX_SHADER);
glShaderSource (vs, 1, &vertex_shader, NULL);
glCompileShader (vs);
GLint success = 0;
glGetShaderiv(vs, GL_COMPILE_STATUS, &success);
checkShader(success, vs, "Vert Shader");
GLuint fs = glCreateShader (GL_FRAGMENT_SHADER);
glShaderSource (fs, 1, &fragment_shader, NULL);
glCompileShader (fs);
glGetShaderiv(fs, GL_COMPILE_STATUS, &success);
checkShader(success, fs, "Frag Shader");
GLuint shader_program = glCreateProgram ();
glAttachShader (shader_program, fs);
glAttachShader (shader_program, vs);
glLinkProgram (shader_program);
gluErrorString("Link prog",glGetError());
glUseProgram (shader_program);
gluErrorString("use prog",glGetError());
GLuint uniT = glGetUniformLocation(shader_program,"u_time"); // ask gl to assign uniform id
gluErrorString("get uniform location",glGetError());
printf("uniT=%i\n",uniT);
glEnable (GL_DEPTH_TEST);
glDepthFunc (GL_LESS);
float t=0;
while (!glfwWindowShouldClose (window)) {
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
gluErrorString("clear",glGetError());
glUseProgram (shader_program);
gluErrorString("use prog",glGetError());
t=t+0.01f;
glUniform1f( uniT, (GLfloat)sin(t));
gluErrorString("set uniform",glGetError());
float val;
glGetUniformfv(shader_program, uniT, &val);
gluErrorString("get uniform",glGetError());
printf("val=%f ",val);
glBindVertexArray (vao);
gluErrorString("bind array",glGetError());
glDrawArrays (GL_TRIANGLE_FAN, 0, 4);
gluErrorString("draw arrays",glGetError());
glfwPollEvents ();
glfwSwapBuffers (window);
gluErrorString("swap buffers",glGetError());
}
glfwTerminate();
return 0;
}
void checkShader(GLuint status, GLuint shader, const char* which) {
if (status==GL_TRUE) return;
int length;
char buffer[1024];
glGetShaderInfoLog(shader, sizeof(buffer), &length, buffer);
fprintf (stderr,"%s Error: %s\n", which,buffer);
glfwTerminate();
exit(-1);
}
struct token_string
{
GLuint Token;
const char *String;
};
static const struct token_string Errors[] = {
{ GL_NO_ERROR, "no error" },
{ GL_INVALID_ENUM, "invalid enumerant" },
{ GL_INVALID_VALUE, "invalid value" },
{ GL_INVALID_OPERATION, "invalid operation" },
{ GL_STACK_OVERFLOW, "stack overflow" },
{ GL_STACK_UNDERFLOW, "stack underflow" },
{ GL_OUT_OF_MEMORY, "out of memory" },
{ GL_TABLE_TOO_LARGE, "table too large" },
#ifdef GL_EXT_framebuffer_object
{ GL_INVALID_FRAMEBUFFER_OPERATION_EXT, "invalid framebuffer operation" },
#endif
{ ~0, NULL } /* end of list indicator */
};
void gluErrorString(const char* why,GLenum errorCode)
{
if (errorCode== GL_NO_ERROR) return;
int i;
for (i = 0; Errors[i].String; i++) {
if (Errors[i].Token == errorCode) {
fprintf (stderr,"error: %s - %s\n",why,Errors[i].String);
glfwTerminate();
exit(-1);
}
}
}
When the code runs, the quad flickers as if the uniform is getting junk values, also getting the value of the uniform shows some odd values like 36893488147419103232.000000 where it should be just a simple sine value
The problem with your code is only indirectly related to GL at all - your GL code is OK.
However, you are using modern OpenGL functions without loading the function pointers as an extension. This might work on some platforms, but not at others. MacOS does guarantee that these functions are exported in the system's GL libs. On windows, Microsofts opengl32.dll never contains function beyond GL 1.1 - your code wouldn't link there. On Linux, you're somewhere inbetween. There is only this old Linux OpenGL ABI document, which guarantees that OpenGL 1.2 functions must be exported by the library. In practice, most GL implementation's libs on Linux export everything (but the fact that the function is there does not mean that it is supported). But you should never directly link these functions, because nobody is guaranteeing anything.
However, the story does not end here: You apparently did this on an implementation which does export the symbols. However, you did not include the correct headers. And you have set up your compiler very poorly. In C, it is valid (but poor style) to call a function which has not been declared before. The compiler will asslume that it returns int and that all parameters are ints. In effect, you are calling these functions, but the compiler will convert the arguments to int.
You would have noticed that if you had set up your compiler to produce some warnings, like -Wall on gcc:
a.c: In function ‘main’:
a.c:74: warning: implicit declaration of function ‘glGenVertexArrays’
a.c:75: warning: implicit declaration of function ‘glBindVertexArray’
[...]
However, the code compiles and links, and I can reproduces results you described (I'm using Linux/Nvidia here).
To fix this, you should use a OpenGL Loader Library. For example, I got your code working by using GLEW. All I had to do was adding at the very top at the file
#define GLEW_NO_GLU // because you re-implemented some glu-like functions with a different interface
#include <glew.h>
and calling
glewExperimental=GL_TRUE;
if (glewInit() != GLEW_OK) {
fprintf (stderr, "ERROR: failed to initialize GLEW\n");
glfwTerminate();
return 1;
}
glGetError(); // read away error generated by GLEW, it is broken in core profiles...
The GLEW headers include declarations for all the functions, so no implicit type conversions do occur any more. GLEW might not be the best choice for core profiles, however I just used it because that's the loader I'm most familiar with.

Opengl will not display anything

I'm trying to get a small triangle to display.
Here is my initialization code:
void PlayerInit(Player P1)
{
glewExperimental = GL_TRUE;
glewInit();
//initialize buffers needed to draw the player
GLuint vao;
GLuint buffer;
glGenVertexArrays(1, &vao);
glGenBuffers(1, &buffer);
//bind the buffer and vertex objects
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
//Set up a buffer to hold 6 floats for position, and 9 floats for color
glBufferData(GL_ARRAY_BUFFER, sizeof(float)*18, NULL, GL_STATIC_DRAW);
//push the vertices of the player into the buffer
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(float)*9, CalcPlayerPoints(P1.GetPosition()));
//push the color of each player vertex into the buffer
glBufferSubData(GL_ARRAY_BUFFER, sizeof(float)*9, sizeof(float)*9, CalcPlayerColor(1));
//create and compile the vertex/fragment shader objects
GLuint vs = create_shader("vshader.glsl" ,GL_VERTEX_SHADER);
GLuint fs = create_shader("fshader.glsl" ,GL_FRAGMENT_SHADER);
//create a program object and link the shaders
GLuint program = glCreateProgram();
glAttachShader(program, vs);
glAttachShader(program, fs);
glLinkProgram(program);
//error checking for linking
GLint linked;
glGetProgramiv(program, GL_LINK_STATUS, &linked);
if(!linked)
{
std::cerr<< "Shader program failed to link" <<std::endl;
GLint logSize;
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &logSize);
char* logMsg = new char[logSize];
glGetProgramInfoLog(program, logSize, NULL, logMsg);
std::cerr<< logMsg << std::endl;
delete [] logMsg;
exit(EXIT_FAILURE);
}
glUseProgram(program);
//create attributes for color and position to pass to shaders
//enable each attribute
GLuint Pos = glGetAttribLocation( program, "vPosition");
glEnableVertexAttribArray(Pos);
GLuint Col = glGetAttribLocation( program, "vColor");
glEnableVertexAttribArray(Col);
//set a pointer at the proper offset into the buffer for each attribute
glVertexAttribPointer(Pos, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) (sizeof(float)*0));
glVertexAttribPointer(Col, 3, GL_FLOAT, GL_FALSE, 0, (const GLvoid*) (sizeof(float)*9));
}
I don't have much experience writing shader linkers, so I think that is where the problem might be. I have some error checking in the shader loader, and nothing comes up. So I think that is fine.
Next I have my display and main function:
//display function for the game
void GameDisplay( void )
{
//set the background color
glClearColor(1.0, 0.0, 1.0, 1.0);
//clear the screen
glClear(GL_COLOR_BUFFER_BIT);
//Draw the Player
glDrawArrays(GL_TRIANGLES, 0, 3);
glutSwapBuffers();
}
//main function
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE);
glutInitWindowSize(500, 500);
glutCreateWindow("Asteroids");
Player P1 = Player(0.0, 0.0);
PlayerInit(P1);
glutDisplayFunc(GameDisplay);
glutMainLoop();
return 0;
}
Vertex shader :
attribute vec3 vPosition;
attribute vec3 vColor;
varying vec4 color;
void main()
{
color = vec4(vColor, 1.0);
gl_Position = vec4(vPosition, 1.0);
}
Fragment shader
varying vec4 color;
void main()
{
gl_FragColor = color;
}
That's all of the relevant code. CalcPlayerPoints just returns a float array of size 9 to hold the triangle coordinates. CalcPlayerColor does something similar.
One last problem that may help with diagnosing the problem is that whenever I try to exit the program by closing the window of the application, I get a breakpoint in the glutmainloop, however if I close the console window, it exits fine.
Edit: I added the shaders for reference.
Edit: I am using opengl version 3.1
Without the shaders, we can't say if the faulty code isn't GLSL (bad vertices transformations, etc.)
Have you tried checking glGetError to see if the problem doesn't come from your initialization code ?
Maybe try to set the output of your fragment shader to, say, vec4(1.0, 0.0, 0.0, 1.0) to check if its normal output is ill-formed.
Your last problem seems to unveil an undefined behavior, like bad memory allocation/deallocation, which may take place in your Player class (by the way consider passing the object as a reference in your initialization code, because it may at the moment trigger a shallow copy and then a double-free of some pointer).
Turns out there was something wrong with how I was returning the array of vertices from the function used to compute them. The rest of the code worked fine after that fix.