Is deepest mipmap level not an average of all the texels? - c++

I was trying to get an average of all the texels I've drawn in a texture attached to an FBO. The texture has RGBA32F format, so precision loss should be minimal in any case.
For actual computation of the average I thought to use hardware generation of mipmaps, via glGenerateMipmap command, and then get the deepest mipmap level – 1×1.
This works nicely when the texture has power-of-two dimensions. But when it's even one pixel below that, and until some other size, I get results very far from average.
See e.g. the following test program:
#include <cmath>
#include <vector>
#include <string>
#include <iostream>
// glad.h is generated by the following command:
// glad --out-path=. --generator=c --omit-khrplatform --api="gl=3.3" --profile=core --extensions=
#include "glad/glad.h"
#include <GL/freeglut.h>
#include <glm/glm.hpp>
using glm::vec4;
GLuint vao, vbo;
GLuint texFBO;
GLuint program;
GLuint fbo;
int width=512, height=512;
void getMeanPixelValue(int texW, int texH)
{
// Get average value of the rendered pixels as the value of the deepest mipmap level
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texFBO);
glGenerateMipmap(GL_TEXTURE_2D);
using namespace std;
// Formula from the glspec, "Mipmapping" subsection in section 3.8.11 Texture Minification
const auto totalMipmapLevels = 1+floor(log2(max(texW,texH)));
const auto deepestLevel=totalMipmapLevels-1;
// Sanity check
int deepestMipmapLevelWidth=-1, deepestMipmapLevelHeight=-1;
glGetTexLevelParameteriv(GL_TEXTURE_2D, deepestLevel, GL_TEXTURE_WIDTH, &deepestMipmapLevelWidth);
glGetTexLevelParameteriv(GL_TEXTURE_2D, deepestLevel, GL_TEXTURE_HEIGHT, &deepestMipmapLevelHeight);
assert(deepestMipmapLevelWidth==1);
assert(deepestMipmapLevelHeight==1);
vec4 pixel;
glGetTexImage(GL_TEXTURE_2D, deepestLevel, GL_RGBA, GL_FLOAT, &pixel[0]);
// Get average value in an actual summing loop over all the pixels
std::vector<vec4> data(texW*texH);
glGetTexImage(GL_TEXTURE_2D, 0, GL_RGBA, GL_FLOAT, data.data());
vec4 avg(0,0,0,0);
for(auto const& v : data)
avg+=v;
avg/=texW*texH;
std::cerr << "Mipmap value: " << pixel[0] << ", " << pixel[1] << ", " << pixel[2] << ", " << pixel[3] << "\n";
std::cerr << "True average: " << avg[0] << ", " << avg[1] << ", " << avg[2] << ", " << avg[3] << "\n";
}
GLuint makeShader(GLenum type, std::string const& srcStr)
{
const auto shader=glCreateShader(type);
const GLint srcLen=srcStr.size();
const GLchar*const src=srcStr.c_str();
glShaderSource(shader, 1, &src, &srcLen);
glCompileShader(shader);
GLint status=-1;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
assert(glGetError()==GL_NO_ERROR);
assert(status);
return shader;
}
void loadShaders()
{
program=glCreateProgram();
const auto vertexShader=makeShader(GL_VERTEX_SHADER, R"(
#version 330
in vec4 vertex;
void main() { gl_Position=vertex; }
)");
glAttachShader(program, vertexShader);
const auto fragmentShader=makeShader(GL_FRAGMENT_SHADER, R"(
#version 330
out vec4 color;
void main()
{
color.r = gl_FragCoord.y<100 ? 1 : 0;
color.g = gl_FragCoord.y<200 ? 1 : 0;
color.b = gl_FragCoord.y<300 ? 1 : 0;
color.a = gl_FragCoord.y<400 ? 1 : 0;
}
)");
glAttachShader(program, fragmentShader);
glLinkProgram(program);
GLint status=0;
glGetProgramiv(program, GL_LINK_STATUS, &status);
assert(glGetError()==GL_NO_ERROR);
assert(status);
glDetachShader(program, fragmentShader);
glDeleteShader(fragmentShader);
glDetachShader(program, vertexShader);
glDeleteShader(vertexShader);
}
void setupBuffers()
{
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
const GLfloat vertices[]=
{
-1, -1,
1, -1,
-1, 1,
1, 1,
};
glBufferData(GL_ARRAY_BUFFER, sizeof vertices, vertices, GL_STATIC_DRAW);
constexpr GLuint attribIndex=0;
constexpr int coordsPerVertex=2;
glVertexAttribPointer(attribIndex, coordsPerVertex, GL_FLOAT, false, 0, 0);
glEnableVertexAttribArray(attribIndex);
glBindVertexArray(0);
}
void setupRenderTarget()
{
glGenTextures(1, &texFBO);
glGenFramebuffers(1,&fbo);
glBindTexture(GL_TEXTURE_2D,texFBO);
glBindTexture(GL_TEXTURE_2D,0);
}
bool init()
{
if(!gladLoadGL())
{
std::cerr << "Failed to initialize GLAD\n";
return false;
}
if(!GLAD_GL_VERSION_3_3)
{
std::cerr << "OpenGL 3.3 not supported\n";
return false;
}
setupRenderTarget();
loadShaders();
setupBuffers();
return true;
}
bool inited=false;
void reshape(int width, int height)
{
::width=width;
::height=height;
std::cerr << "New size: " << width << "x" << height << "\n";
if(!inited)
{
if(!(inited=init()))
std::exit(1);
}
glViewport(0,0,width,height);
glBindTexture(GL_TEXTURE_2D,texFBO);
glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA32F,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,nullptr);
glBindTexture(GL_TEXTURE_2D,0);
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER,GL_COLOR_ATTACHMENT0,GL_TEXTURE_2D,texFBO,0);
const auto status=glCheckFramebufferStatus(GL_FRAMEBUFFER);
assert(status==GL_FRAMEBUFFER_COMPLETE);
glBindFramebuffer(GL_FRAMEBUFFER,0);
}
void display()
{
if(!inited)
{
if(!(inited=init()))
std::exit(1);
}
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
glUseProgram(program);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glBindVertexArray(0);
getMeanPixelValue(width, height);
// Show the texture on screen
glBindFramebuffer(GL_READ_FRAMEBUFFER,fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER,0);
glBlitFramebuffer(0,0,width,height,0,0,width,height,GL_COLOR_BUFFER_BIT,GL_NEAREST);
glFinish();
}
int main(int argc, char** argv)
{
glutInitContextVersion(3,3);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGB);
glutInitWindowSize(width, height);
glutCreateWindow("Test");
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMainLoop();
}
I get the following output when I resize the window vertically:
New size: 512x512
Mipmap value: 0.195312, 0.390625, 0.585938, 0.78125
True average: 0.195312, 0.390625, 0.585938, 0.78125
New size: 512x511
Mipmap value: 0, 0, 1, 1
True average: 0.195695, 0.391389, 0.587084, 0.782779
New size: 512x479
Mipmap value: 0, 0.00123596, 1, 1
True average: 0.208768, 0.417537, 0.626305, 0.835073
New size: 512x453
Mipmap value: 0, 0.125, 1, 1
True average: 0.220751, 0.441501, 0.662252, 0.883002
The above mipmap values are not simply imprecise average – they are not even close to corresponding averages, aside from the power-of-two size!
This is on Kubuntu 18.04 with the following information from glxinfo:
Vendor: Intel Open Source Technology Center (0x8086)
Device: Mesa DRI Intel(R) Haswell Server (0x41a)
Version: 18.2.2
So, what's happening here? Is the deepest mipmap level not an average of all the texels in the texture? Or is it simply a bug in the OpenGL implementation?

Related

OpenGL debug context warning - "Will use VIDEO memory as the source for buffer objection

I'm jumping through the hoops right now to learn opengl and I've come across an issue. On my desktop computer with a nvidia gtx 780 opengl is printing out a warning via the glDebugMessageCallback mechanism:
"Buffer object 1 (bound to _GL_ARRAY_BUFFER_ARB, usage hint is GL_STATIC_DRAW) will use VIDEO memory as the source for buffer object operations."
I'm rendering 1 cube with a vertex and index buffer so this message repeats for every buffer object I'm creating (2 of them). However, there is also one final warning at the end which states:
"Vertex shader in program 3 is being recompiled based on GL State."
I'm still able to render my cube I was rendering before, but the color I had set is flashing between white and the color now. I searched online and found this answer - https://community.khronos.org/t/nvidia-output-debug-error-131185/66033 - which basically said this is nothing but a warning and everything should be fine but that wouldn't explain why my cube is flashing between white and my color now. This same code is working fine on my laptop (2019 Asus laptop which also has a nvidia GTX graphics chip). Anyone come across this issue before? Here is the relevant code:
const char* vertexShaderCode =
R"HereDoc(
#version 430
in layout(location=0) vec3 position;
in layout(location=1) vec3 color;
out vec3 fragColor;
uniform mat4 transformationMatrix;
void main()
{
vec4 newPos = vec4(position, 1.0) * transformationMatrix;//vector is on the left side because my matrices are row major
gl_Position = newPos;
vec3 changedColors;
changedColors.r += color.r + 0;
changedColors.g += color.g + 0;
changedColors.b += color.b + 0;
fragColor = changedColors;
};
)HereDoc";
const char* fragmentShaderCode =
R"HereDoc(
#version 430
out vec4 color;
in vec3 fragColor;
void main()
{
color = vec4(fragColor, 1.0f);
};
)HereDoc";
void GLAPIENTRY MyOpenGLErrorCallbackFunc(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *message, const void *userParam)
{
BGZ_CONSOLE("%s type=0x%x %s\n", ( type == GL_DEBUG_TYPE_ERROR ? "** GL ERROR **" : "" ), type, message);
};
void CheckCompileStatus(GLuint shaderID)
{
GLint compileStatus;
glGetShaderiv(shaderID, GL_COMPILE_STATUS, &compileStatus);
if(compileStatus != GL_TRUE)
{
GLint infoLogLength;
glGetShaderiv(shaderID, GL_INFO_LOG_LENGTH, &infoLogLength);
GLchar buffer[512] = {};
GLsizei bufferSize;
glGetShaderInfoLog(shaderID, infoLogLength, &bufferSize, buffer);
BGZ_CONSOLE("%s", buffer);
InvalidCodePath;
};
};
void CheckLinkStatus(GLuint programID)
{
GLint linkStatus;
glGetProgramiv(programID, GL_LINK_STATUS, &linkStatus);
if(linkStatus != GL_TRUE)
{
GLint infoLogLength;
glGetProgramiv(programID, GL_INFO_LOG_LENGTH, &infoLogLength);
GLchar buffer[512] = {};
GLsizei bufferSize;
glGetProgramInfoLog(programID, infoLogLength, &bufferSize, buffer);
BGZ_CONSOLE("%s", buffer);
InvalidCodePath;
};
};
local_func void InstallShaders()
{
GLuint vertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint fragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
const char* adapter[1];
adapter[0] = vertexShaderCode;
glShaderSource(vertexShaderID, 1, adapter, 0);
adapter[0] = fragmentShaderCode;
glShaderSource(fragmentShaderID, 1, adapter, 0);
glCompileShader(vertexShaderID);
glCompileShader(fragmentShaderID);
CheckCompileStatus(vertexShaderID);
CheckCompileStatus(fragmentShaderID);
GLuint programID = glCreateProgram();
glAttachShader(programID, vertexShaderID);
glAttachShader(programID, fragmentShaderID);
glLinkProgram(programID);
CheckLinkStatus(programID);
glUseProgram(programID);
};
local_func void
GLInit(int windowWidth, int windowHeight)
{
glEnable(GL_DEBUG_OUTPUT);
glDebugMessageCallback(MyOpenGLErrorCallbackFunc, 0);
glViewport(0, 0, windowWidth, windowHeight);
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glEnable(GL_DEPTH_TEST);
glEnable(GL_CULL_FACE);//Defaults to CCW ordering of indicies meaning all indicies that, from the viewers perspective, creating triangles in a CW manner repsrent visible triangles.
glCullFace(GL_BACK);//Culls only back faces (faces facing away from viewer)
InstallShaders();
}
void Draw(Memory_Partition* memPart, s32 id, RunTimeArr<s16> meshIndicies)
{
glDisable(GL_TEXTURE_2D);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 2);
glDrawElements(GL_TRIANGLES, (s32)meshIndicies.length, GL_UNSIGNED_SHORT, 0);
glEnable(GL_TEXTURE_2D);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
};
//This implements a discriminated union for buffering render commands that my game code layer uses.
void RenderViaHardware(Rendering_Info&& renderingInfo, Memory_Partition* platformMemoryPart, int windowWidth, int windowHeight)
{
local_persist bool glIsInitialized { false };
if (NOT glIsInitialized)
{
GLInit(windowWidth, windowHeight);
glClearColor(0.0f, 0.0f, 1.0f, 0.0f);
glIsInitialized = true;
};
u8* currentRenderBufferEntry = renderingInfo.cmdBuffer.baseAddress;
Camera3D camera3d = renderingInfo.camera3d;
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_TEXTURE_2D);
for (s32 entryNumber = 0; entryNumber < renderingInfo.cmdBuffer.entryCount; ++entryNumber)
{
RenderEntry_Header* entryHeader = (RenderEntry_Header*)currentRenderBufferEntry;
switch (entryHeader->type)
{
case EntryType_InitBuffer:{
RenderEntry_InitBuffer bufferData = *(RenderEntry_InitBuffer*)currentRenderBufferEntry;
ScopedMemory scope{platformMemoryPart};
RunTimeArr<GLfloat> verts{};
InitArr(verts, platformMemoryPart, bufferData.verts.length * 6);
s32 i{};
f32 colorR{1.0f}, colorG{}, colorB{};//Im just hard coding color data right now while I'm learning
for(s32 j{}; j < bufferData.verts.length; ++j)
{
verts.Push(bufferData.verts[j].x);
verts.Push(bufferData.verts[j].y);
verts.Push(bufferData.verts[j].z);
verts.Push(colorR);
verts.Push(colorG);
verts.Push(colorB);
};
u32 vertexArrayID{};
glGenVertexArrays(1, &vertexArrayID);
glBindVertexArray(vertexArrayID);
GLuint bufferID;
glGenBuffers(1, &bufferID);
glBindBuffer(GL_ARRAY_BUFFER, bufferID);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * verts.length, verts.elements, GL_DYNAMIC_DRAW);
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 6, 0);
glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, sizeof(GLfloat) * 6, (char*)(sizeof(GLfloat)*3));
GLuint indexBufferID;
glGenBuffers(1, &indexBufferID);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, indexBufferID);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(s16) * bufferData.indicies.length, bufferData.indicies.elements, GL_DYNAMIC_DRAW);
currentRenderBufferEntry += sizeof(RenderEntry_InitBuffer);
}break;
//...other cases for entries which are irrelevant to problem
case EntryType_Geometry: {
ScopedMemory scope{platformMemoryPart};
RenderEntry_Geometry geometryEntry = *(RenderEntry_Geometry*)currentRenderBufferEntry;
//camera transform setup
Mat4x4 xRotMatrix = XRotation(camera3d.rotation.x);
Mat4x4 yRotMatrix = YRotation(camera3d.rotation.y);
Mat4x4 zRotMatrix = ZRotation(camera3d.rotation.z);
Mat4x4 fullRotMatrix = xRotMatrix * yRotMatrix * zRotMatrix;
v3 xAxis = GetColumn(fullRotMatrix, 0);
v3 yAxis = GetColumn(fullRotMatrix, 1);
v3 zAxis = GetColumn(fullRotMatrix, 2);
//Setup full transform matrix
Mat4x4 camTransform = ProduceCameraTransformMatrix(xAxis, yAxis, zAxis, camera3d.worldPos);
Mat4x4 projectionTransform = ProduceProjectionTransformMatrix_UsingFOV(renderingInfo.fov, renderingInfo.aspectRatio, renderingInfo.nearPlane, renderingInfo.farPlane);
Mat4x4 fullTransformMatrix = projectionTransform * camTransform * geometryEntry.worldTransform;
//Send transform matrix to vertex shader
GLint transformMatrixUniformLocation = glGetUniformLocation(3, "transformationMatrix");
glUniformMatrix4fv(transformMatrixUniformLocation, 1, GL_FALSE, &fullTransformMatrix.elem[0][0]);
Draw(platformMemoryPart, geometryEntry.id, geometryEntry.indicies);
currentRenderBufferEntry += sizeof(RenderEntry_Geometry);
}break;
InvalidDefaultCase;
};
}
renderingInfo.cmdBuffer.entryCount = 0;
};
EDIT:
I figured out the issue with the colors not working which was answered in the comments below. However, I still don't know what these warnings are trying to tell me and if they are anything I should be looking to fix.
You variable vec3 changedColors; is uninitialized. Initialize it with vec3 changedColors = vec3(0);. The reason why it works on your laptop might be that its graphics driver will initialize it to zero by default, while your other graphics driver won't.
Regarding the warning (not error). It just warns you that your buffer will be put in video memory since you're using GL_STATIC_DRAW for your buffer. It's actually more of a log and you can safely ignore it. If you want to get rid of it you have to filter it away in your callback (which you passed to glDebugMessageCallback). Your callback will have a severity parameter that lets you to filter messages with a certain severity.
Or, if you only want to get rid of that specific message, filter on its id value.
Here's an example taken from blog.nobel-joergensen.com:
void APIENTRY openglCallbackFunction(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar* message, const void* userParam)
{
cout << "---------------------opengl-callback-start------------" << endl;
cout << "message: "<< message << endl;
cout << "type: ";
switch (type) {
case GL_DEBUG_TYPE_ERROR:
cout << "ERROR";
break;
case GL_DEBUG_TYPE_DEPRECATED_BEHAVIOR:
cout << "DEPRECATED_BEHAVIOR";
break;
case GL_DEBUG_TYPE_UNDEFINED_BEHAVIOR:
cout << "UNDEFINED_BEHAVIOR";
break;
case GL_DEBUG_TYPE_PORTABILITY:
cout << "PORTABILITY";
break;
case GL_DEBUG_TYPE_PERFORMANCE:
cout << "PERFORMANCE";
break;
case GL_DEBUG_TYPE_OTHER:
cout << "OTHER";
break;
}
cout << endl;
cout << "id: " << id << endl;
cout << "severity: ";
switch (severity){
case GL_DEBUG_SEVERITY_LOW:
cout << "LOW";
break;
case GL_DEBUG_SEVERITY_MEDIUM:
cout << "MEDIUM";
break;
case GL_DEBUG_SEVERITY_HIGH:
cout << "HIGH";
break;
}
cout << endl;
cout << "---------------------opengl-callback-end--------------" << endl;
}

openGL GLFW White Screen w/ Loading Cursor on run

ISSUE: When launched, GLWindow displays only a white screen and the cursor displays a loading circle, signifying that something is still being loaded. The window displays "Not Responding" shortly after that.
I have tried downgrading to openGL 3.3 and have used glad to help with that, but the problem persists.
Hello all,
I've been working to create a sphere with alternating colors using a vertex shader.
The code that I've shared below was slightly altered from code that was used to shade a quad, which worked fine. I expect that there will be issues with similar logic being used to shade a circle, or to build a sphere and shade that. I am NOT at that point yet however. Something is keeping my GL Window from displaying properly and I am hoping that someone can help me with my GLFW and glew logic to share why the window is failing to load.
NOTE:I've edited this code to include comments for every step, which makes it seem much longer than it is. I would appreciate any help or insight.
PRIMARY CLASS
#include <iostream>
#include <sstream>
#define GLEW_STATIC
//always GLEW before GLFW
#include "GL/glew.h"
#include "GLFW/glfw3.h"
#include "glm/glm.hpp"
#include "ShaderProgram.h"
#ifndef M_PI
# define M_PI 3.141592653
#endif
/////gLOBAL
GLFWwindow* w = NULL;
const int wWidth = 800;
const int wHeight = 600;
void key_callback(GLFWwindow *w, int key, int scancode, int action, int mode);
//update colors based on average framerate
void averageFPS(GLFWwindow* window);
//screen resizing
void glfw_onFramebufferSize(GLFWwindow* window, int width, int height);
bool initOpenGL();
static void error(int error, const char *desc)
{
fputs(desc, stderr);
}
//setting up values for keys
int main() {
if (!initOpenGL()) ///5IMPR
{
// An error occured
std::cerr << "GLFW not initialized" << std::endl;
return -1;
}
glfwSetErrorCallback(error);
GLfloat vertices[] = {
-0.5f, 0.5f, 0.0f,
0.5f, 0.5f, 0.0f,
-0.5f, 1.0f, 0.0f
};
GLuint indices[] = {
0, 1, 2,
0, 2, 3
};
// 2. Set up buffers on the GPU
GLuint vbo, ibo, vao;
glGenBuffers(1, &vbo); // Generate an empty vertex buffer on the GPU
glBindBuffer(GL_ARRAY_BUFFER, vbo); // "bind" or set as the current buffer we are working with
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); // copy the data from CPU to GPU
glGenVertexArrays(1, &vao); // Tell OpenGL to create new Vertex Array Object
glBindVertexArray(vao); // Make it the current one
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL); // Define a layout for the first vertex buffer "0"
glEnableVertexAttribArray(0); // Enable the first attribute or attribute "0"
// Set up index buffer
glGenBuffers(1, &ibo); // Create buffer space on the GPU for the index buffer
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
glBindVertexArray(0); // unbind to make sure other code doesn't change it
ShaderProgram shaderProgram;
shaderProgram.assignShaders("shaders/ColorShader.vert", "shaders/ColorShader.frag");
////////SETUP RENDERING
while (!glfwWindowShouldClose(w))
{
averageFPS(w);
//process events
glfwPollEvents();
// Clear the screen
glClear(GL_COLOR_BUFFER_BIT);
shaderProgram.use();
GLfloat time = (GLfloat)glfwGetTime();
GLfloat blueSetting = (sin(time) / 2) + 0.5f;
glm::vec2 pos;
pos.x = sin(time) / 2;
pos.y = cos(time) / 2;
shaderProgram.setUniform("vertColor", glm::vec4(0.0f, 0.0f, blueSetting, 1.0f));
shaderProgram.setUniform("posOffset", pos);
/////COLOR OF CIRCLE OUTLINE
//glColor4f(0.0, 0.0, 1.0, 1.0); //RGBA
//PRIMARY BODY
// Draw our line
glBegin(GL_LINE_LOOP);
//glColor3f(0,0,1);
static double iteration = 0;
// The x, y offset onto the screen -- this should later be centered
static const int offset = 150;
static const float radius = 50;
// Calculate our x, y cooredinates
double x1 = offset + radius + 100 * cos(1);
double y1 = offset + radius + 100 * sin(1);
static double wobble = 0.0;
// A = (π * r²)
double a = M_PI * (100 * 2); //area
// C = (2 * π * r)
double c = 2 * M_PI * 100; //circumference
static double b = 128;
for (double i = 0; i < 2 * M_PI; i = i + ((2 * M_PI) / b))
{
double x = x1 + radius * cos(i);
double y = y1 + radius * sin(i);
glVertex2f(x, y);
glVertex2f(x, y);
}
iteration += 0.01;
////PRIMARY BODY End
glBindVertexArray(vao);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
//glDrawElements(GL_LINE_LOOP, 6, GL_UNSIGNED_INT, 0);
glBindVertexArray(0);
// Swap buffers and look for events
glfwSwapBuffers(w);
}
//clean up
glDeleteVertexArrays(1, &vao);
glDeleteBuffers(1, &vbo);
glDeleteBuffers(1, &ibo);
//glfwDestroyWindow(w);
glfwTerminate();
return 0;
}
///////START Initializing glfw glew etc
bool initOpenGL(){
//this method will exit on these conditions
GLuint error = glfwInit();
if (!error)
return false;
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
w = glfwCreateWindow(wWidth, wHeight, "Exercise", NULL, NULL);
//Filling Window
if (w== NULL)
{
std::cerr << "glfw window not created" << std::endl;
glfwTerminate();
return false;
}
//update context
glfwMakeContextCurrent(w);
// Initialize GLEWunifor
glewExperimental = GL_TRUE;
GLuint err = glewInit();
if (err != GLEW_OK)
{
std::cerr << "initialize GLEW Failed" << std::endl;
return false;
}
//setup key callbacks
glfwSetKeyCallback(w, key_callback);
glfwSetFramebufferSizeCallback(w, glfw_onFramebufferSize);
while (!glfwWindowShouldClose(w))
{
//int width, height;
// glfwGetFramebufferSize(w, &width, &height); //move out of while??
// glViewport(0, 0, width, height); //remove??
}
glClearColor(0.23f, 0.38f, 0.47f, 1.0f); ///5ADD
// Define the viewport dimensions
glViewport(0, 0, wWidth, wHeight); //necessary?
return true;
}
void key_callback(GLFWwindow *w, int key, int scancode, int action, int mode)
{
// See http://www.glfw.org/docs/latest/group__keys.html
if ((key == GLFW_KEY_ESCAPE || key == GLFW_KEY_Q) && action == GLFW_PRESS)
glfwSetWindowShouldClose(w, GL_TRUE);
if (key == GLFW_KEY_W && action == GLFW_PRESS)
{
bool showWires = false;
if (showWires)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
else
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
}
}
//whever window resizes, do this
void glfw_onFramebufferSize(GLFWwindow* window, int width, int height)
{
glViewport(0, 0, width, height);
}
void averageFPS(GLFWwindow* window) ///5ADDdown
{
static double previousSeconds = 0.0;
static int frameCount = 0;
double passedSeconds;
double currentSeconds = glfwGetTime(); //seconds since GLFW started
passedSeconds = currentSeconds - previousSeconds;
// Limit time updates to 4 times per second
if (passedSeconds > 0.25)
{
previousSeconds = currentSeconds;
double fps = (double)frameCount / passedSeconds;
// double frameInMilSecs = 1000.0 / fps;
frameCount = 0;}
frameCount++;
}
SHADER MANAGER/HANDLER CLASS
#include "ShaderProgram.h"
#include <fstream>
#include <iostream>
#include <sstream>
ShaderProgram::ShaderProgram()
: mProgram(0){
}
ShaderProgram::~ShaderProgram()
{
glDeleteProgram(mProgram);
}
bool ShaderProgram::assignShaders(const char* vertFileName, const char* fragFileName)
{
//Shaders output objects called programs that define their relationship and lead to .exe functionality
//assigning pointer to the shader
string vsString = readFile(vertFileName);
string fsString = readFile(fragFileName);
const GLchar* fsSourcePtr = fsString.c_str();
const GLchar* vsSourcePtr = vsString.c_str();
//creating vertex shader(vs) shader object
GLuint vs = glCreateShader(GL_VERTEX_SHADER);
GLuint fs = glCreateShader(GL_FRAGMENT_SHADER);
//assigning shader source using address. Replaces the source code in a shader object //#arg (shader, count Strings, pointer to const File ,size)
glShaderSource(vs, 1, &vsSourcePtr, NULL);
glShaderSource(fs, 1, &fsSourcePtr, NULL);
glCompileShader(vs);
glCompileShader(fs);
testProgramCompile();
testShaderCompile(vs);
testShaderCompile(fs);
//createProgram returns GLUint which is basically an unsigned int... we will use This Handler to create a program object
mProgram = glCreateProgram();
if (mProgram == 0)
{
std::cerr << "Shader cannot be created" << std::endl;
return false;
}
//assign the program object(mProgram) to the Shader
glAttachShader(mProgram, vs);
glAttachShader(mProgram, fs);
//this method accepts a GLuint "program" . If its an object of type GL_VERTEX_SHADER,
//itll create a .exe that runs on the programmable vertex processor. same goes for geometric and fragment shaders if they were included
//it will also bind all user defined uniform variables and attributes to the program
//The program can then be made part of a defined state by calling useProgram
glLinkProgram(mProgram);
testProgramCompile();
testShaderCompile(vs);
testShaderCompile(vs);
//cleaning up the elements we already used
glDeleteShader(vs);
glDeleteShader(fs);
//clear the identifier lookup map(in this case, there's only one)
mUniformIdentifiers.clear();
return true;
}//end main
//Read the shaderFile. strngstream for reading multiple lines
string ShaderProgram:: readFile(const string& filename) {
std::stringstream strgstream;
std::ifstream file;
try
{
file.open(filename, std::ios::in);
if (!file.fail())
{
strgstream << file.rdbuf();
}
file.close();
}
catch (std::exception e)
{
std::cerr << "Error: File or File Name Issues" << std::endl;
}
return strgstream.str();
}
//use the Program Object we created in this current state(color)
void ShaderProgram::use()
{
if (mProgram != 0)
glUseProgram(mProgram);
}
void ShaderProgram::testProgramCompile() {
int status = 0;
GLuint program = mProgram;
// ///CHECKING GL_LINK_STATUS to see if Program Link was successul. Link Status will return GL_TRUE if it was
glGetProgramiv( mProgram, GL_LINK_STATUS, &status); //requesting the status
if (status == GL_FALSE)
{
std::cerr << "Linking Error with Program " << std::endl;
}
}
void ShaderProgram :: testShaderCompile(GLuint shader) {
int status = 0;
// ///CHECKING GL_LINK_STATUS to see if Program Link was successul. Link Status will return GL_TRUE if it was
glGetProgramiv(shader, GL_LINK_STATUS, &status); //requesting the status
if (status == GL_FALSE)
{
std::cerr << "Linking Error with Shader " << std::endl;
}
}
////GETTERS AND SETTERS
GLuint ShaderProgram::getProgram() const
{
return mProgram;
}
void ShaderProgram::setUniform(const GLchar* name, const glm::vec2& v)
{
GLint address = getUniformIdentifier(name);
glUniform2f(address, v.x, v.y);
}
void ShaderProgram::setUniform(const GLchar* name, const glm::vec3& v)
{
GLint address = getUniformIdentifier(name);
glUniform3f(address, v.x, v.y, v.z);
}
void ShaderProgram:: setUniform(const GLchar* name, const glm::vec4& v) {
GLint address = getUniformIdentifier(name);
glUniform4f(address, v.x, v.y, v.z, v.w);
}
//Maybe need to switch places with setUniform
GLint ShaderProgram :: getUniformIdentifier(const GLchar* name) {
std::map<std::string, GLint>::iterator it;
it = mUniformIdentifiers.find(name);
//std::map<std::string, GLint>
// Only need to query the shader program IF it doesn't already exist.
if (it == mUniformIdentifiers.end())
{
// Find it and add it to the map
mUniformIdentifiers[name] = glGetUniformLocation(mProgram, name);
}
// Return it
return mUniformIdentifiers[name];
}
You have this in your init function.
while (!glfwWindowShouldClose(w))
{
//int width, height;
// glfwGetFramebufferSize(w, &width, &height); //move out of while??
// glViewport(0, 0, width, height); //remove??
}
Your code is presumably hanging here.

OpenGL drawing Triangles only white

I try learing OpenGL with those two tutorials:
https://learnopengl.com/#!Getting-started/Hello-Triangle and
https://www.youtube.com/playlist?list=PLEETnX-uPtBXT9T-hD0Bj31DSnwio-ywh
When I draw a simple triangle it is only white. But the code seems right.
This is the fragment Shader:
#version 330 core
out vec4 fragColor;
void main()
{
fragColor = vec4(0.5, 0.3, 0.1, 1.0);
}
This is the ShaderProgram:
ShaderProgram::ShaderProgram(std::string fileName)
{
vertexShader = glCreateShader(GL_VERTEX_SHADER);
if (vertexShader == 0)
std::cerr << "VertexShader creation failed! " << std::endl;
const char* vertexShaderSource = (SourceLoader(fileName + ".vs")).c_str();
glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);
glCompileShader(vertexShader);
CheckErrorMessages(vertexShader, GL_COMPILE_STATUS, false, "VertexShader Compilation failed! ");
fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
if (fragmentShader == 0)
std::cerr << "FragmentShader Creation failed! " << std::endl;
const char* fragmentShaderSource = (SourceLoader(fileName + ".fs")).c_str();
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
glCompileShader(fragmentShader);
CheckErrorMessages(fragmentShader, GL_COMPILE_STATUS, false, "FragmentShader Compilation failed! ");
program = glCreateProgram();
glAttachShader(program, vertexShader);
glAttachShader(program, fragmentShader);
glLinkProgram(program);
CheckErrorMessages(program, GL_LINK_STATUS, true, "Program linking failed! ");
glValidateProgram(program);
CheckErrorMessages(program, GL_VALIDATE_STATUS, true, "Program validation failed! ");
}
I have three methods in the ShaderProgram class:
1. a method to load the shader code, which is definitly working.
2. a method to Check for Error Messages, which is also working.
3. and a bind() Funktion which is just using glUseProgram(program)
I also have a class for the window, which is created by SDL
Display::Display(std::string title, unsigned int width, unsigned int height)
{
SDL_GL_SetAttribute(SDL_GL_RED_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_GREEN_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BLUE_SIZE, 8);
SDL_GL_SetAttribute(SDL_GL_BUFFER_SIZE, 32);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
m_window = SDL_CreateWindow(title.c_str(), SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED, width, height, SDL_WINDOW_OPENGL);
m_glcontext = SDL_GL_CreateContext(m_window);
GLenum status = glewInit();
if (status != GLEW_OK)
std::cerr << "GLEW failed to initialize!" << std::endl;
isClosed = false;
}
the Display class has a method to update and clear:
void Display::Update()
{
SDL_GL_SwapWindow(m_window);
SDL_Event e;
while (SDL_PollEvent(&e))
{
if (e.type == SDL_QUIT)
isClosed = true;
}
}
void Display::Clear(float red, float green, float blue, float alpha)
{
glClearColor(red, green, blue, alpha);
glClear(GL_COLOR_BUFFER_BIT);
}
I also have a class called Mesh to manage VAO and VBO etc.:
Mesh::Mesh(Vertex* vertices, unsigned int numVertices)
{
drawCount = numVertices;
glGenVertexArrays(1, &VAO);
glBindVertexArray(VAO);
GLuint VBO;
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, numVertices * sizeof(vertices[0]), vertices, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(vertices[0]), (void*)0);
glEnableVertexAttribArray(0);
}
Mesh has one Funktion to draw the given vertices:
void Mesh::Draw()
{
glBindVertexArray(VAO);
glDrawArrays(GL_TRIANGLES, 0, drawCount);
glBindVertexArray(0);
}
The main Funktion consists out of a while loop:
int main(int argc, char* argv[])
{
SDL_Init(SDL_INIT_EVERYTHING);
Display display("Fenster", 1024, 840);
ShaderProgram shader("./res/Shader");
Vertex vertices[] = { Vertex(glm::vec3(1, 1, 0)), Vertex(glm::vec3(1, -1, 0)), Vertex(glm::vec3(-1, -1, 0)) };
Mesh mesh(vertices, 3);
while (!display.getIsClosed())
{
display.Clear(1.0f, 0.0f, 1.0f, 1.0f);
shader.Bind();
mesh.Draw();
display.Update();
}
SDL_Quit();
return 0;
}
The problem is I don´t get any Error, but the triangle keep staying white.
Thank you for helping!
Edit: Vertex Shader is here:
#version 330 core
layout (location = 0) in vec3 position;
void main()
{
gl_Position = vec4(position.x, position.y, position.z, 1.0);
}
If you check your infoLog you'd find that your shaders aren't compiling. You're assigning the shader source pointer to a temporary string that gets destroyed at the end of the line, and you're sending gibberish to the shader compiler:
const char* fragmentShaderSource = (SourceLoader(fileName + ".fs")).c_str();// After this line the pointer isn't valid
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
SourceLoader returns a temporary string, and you assign fragmentShaderSource pointer to the string. Then there is junk data there by the time you pass it to glShaderSource. You can do:
std::string vertShaderString = SourceLoader(fileName + ".vs");
const char* vertShaderSource = vertShaderString.c_str();
glShaderSource(vertexShader, 1, &vertShaderSource, NULL);
You also need to fix the same problem for the fragment shader.
Also you drew a clockwise winding triangle, which is fine, but by default OpenGL considers counterclockwise winding as front-facing.
Another thing, the reason you didn't catch the error is because you CheckErrorMessages doesn't do anything, you should be writing the infoLog to the string or something.

GtkGLArea clears background but does not draw

I have been writing a simple GTK+ application and am just getting started with graphical development. I understand that this may not be a good place to start, jumping straight into 3D rendering, but I've done a small amount of it before and with great success using Glade and reading a plethora of docs, I figured it would not be hard to integrate the two - I figured incorrectly. The problem at hand is that glDrawArrays appears to not be working. I looked at this question and unfortunately, it did not help me. I followed some of this tutorial on OpenGL and also this tutorial on GtkGLArea again to no avail.
Can anyone point me in the right direction on this one? I'm not sure where to go from here.
The relevant code is below:
#include "RenderingManager.hpp"
RenderingManager::RenderingManager() {
///GTK+ Setup///
std::cout << "starting render constructor" << std::endl;
glArea = GTK_GL_AREA(gtk_gl_area_new());
std::cout << "got new glarea" << std::endl;
g_signal_connect(GTK_WIDGET(glArea), "render", G_CALLBACK(signal_render), this);
g_signal_connect(GTK_WIDGET(glArea), "realize", G_CALLBACK(signal_realize), this);
g_signal_connect(GTK_WIDGET(glArea), "unrealize", G_CALLBACK(signal_unrealize), this);
gtk_widget_show(GTK_WIDGET(glArea));
///Get Shaders///
// vshader.open("vertex.shader");
// fshader.open("fragment.shader");
std::cout << "finished render constructor" << std::endl;
}
void RenderingManager::onRender() {
// Dark blue background
glClearColor(0.1f, 0.0f, 0.1f, 0.0f);
draw_triangle();
glFlush();
}
void RenderingManager::initBuffers () {
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
}
void RenderingManager::loadShaders() {
// Read the Vertex Shader code from the file
std::ifstream VertexShaderStream("vertex.shader", std::ios::in);
if(VertexShaderStream.is_open()){
std::string Line = "";
while(getline(VertexShaderStream, Line))
vshader += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::ifstream FragmentShaderStream("fragment.shader", std::ios::in);
if(FragmentShaderStream.is_open()){
std::string Line = "";
while(getline(FragmentShaderStream, Line))
fshader += "\n" + Line;
FragmentShaderStream.close();
}
GLuint vsh, fsh;
vsh = glCreateShader(GL_VERTEX_SHADER);
fsh = glCreateShader(GL_FRAGMENT_SHADER);
vshp = vshader.data();
fshp = fshader.data();
// vshp = vshader.get().c_str();
// fshp = fshader.get().c_str();
// vshader.get(vshp);
// fshader.get(fshp);
printf("%s\n%s\n", vshp, fshp);
glShaderSource(vsh, 1, &vshp, NULL);
glShaderSource(fsh, 1, &fshp, NULL);
glCompileShader(vsh);
glCompileShader(fsh);
shaderProgramID = glCreateProgram();
glAttachShader(shaderProgramID, vsh);
glAttachShader(shaderProgramID, fsh);
glLinkProgram(shaderProgramID);
GLint Result = GL_FALSE;
int InfoLogLength;
// Check Vertex Shader
glGetShaderiv(vsh, GL_COMPILE_STATUS, &Result);
glGetShaderiv(vsh, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
char* VertexShaderErrorMessage = new char[InfoLogLength+1];
glGetShaderInfoLog(vsh, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
// Check Fragment Shader
glGetShaderiv(fsh, GL_COMPILE_STATUS, &Result);
glGetShaderiv(fsh, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
char* FragmentShaderErrorMessage = new char[InfoLogLength+1];
glGetShaderInfoLog(fsh, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
}
void RenderingManager::onActivate() {
// We need to make the context current if we want to
// call GL API
gtk_gl_area_make_current (glArea);
glewExperimental = GL_TRUE;
glewInit();
loadShaders();
initBuffers();
}
void RenderingManager::signal_render(GtkGLArea *a, gpointer *user_data) {
reinterpret_cast<RenderingManager*>(user_data)->onRender();
}
void RenderingManager::signal_realize(GtkGLArea *a, gpointer *user_data) {
reinterpret_cast<RenderingManager*>(user_data)->onActivate();
}
void RenderingManager::signal_unrealize(GtkGLArea *a, gpointer *user_data) {
//Don't do this
//reinterpret_cast<RenderingManager*>(user_data)->~RenderingManager();
}
void RenderingManager::draw_triangle() {
// Clear the screen
glClear( GL_COLOR_BUFFER_BIT );
// Use our shader
glUseProgram(shaderProgramID);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3); // 3 indices starting at 0 -> 1 triangle
glDisableVertexAttribArray(0);
}
GtkGLArea *RenderingManager::expose() {
//yikes
return glArea;
}
RenderingManager::~RenderingManager() {
glDeleteBuffers(1, &vbo);
glDeleteVertexArrays(1, &vao);
glDeleteProgram(shaderProgramID);
std::cout << "GL Resources deleted." << std::endl;
}
Due to the asynchronous nature of X11 (Gtk+ uses it) the gl-context can't be created before the window is realized (a connection to X11 is made).
Create the gl-context in your signal_realize() and make it current before drawing, which should be done handling signal expose_event (gtk+ 2) or draw(gtk+ 3)

OpenGL program won't execute properly if an explicit version is set

My computer runs Ubuntu 16.04 and is equipped with a Nvidia GeForce GT 630M graphics card with a proprietary driver installed. The glGetString(GL_VERSION) function shows that, by default, my graphics card supports OpenGL 4.5.
I have been following the Learn OpenGL tutorial series and I have the following difficulty: I can only get the tutorial's "Hello Triangle" program to run properly if I comment out the lines
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
Leaving those lines as-is will prevent the triangle from appearing.
I am having trouble understanding why setting a required OpenGL version lower than the OpenGL version my card can support would make the program fail.
EDIT: the commands
std::cout << "Renderer: " << glGetString(GL_RENDERER) << std::endl;
std::cout << "Version: " << glGetString(GL_VERSION) << std::endl;
std::cout << "Shading Language: " << glGetString(GL_SHADING_LANGUAGE_VERSION) << std::endl;
output
Renderer: GeForce GT 630M/PCIe/SSE2
Version: 4.5.0 NVIDIA 361.42
Shading Language: 4.50 NVIDIA
if those lines are commented out, and
Renderer: GeForce GT 630M/PCIe/SSE2
Version: 3.3.0 NVIDIA 361.42
Shading Language: 3.30 NVIDIA via Cg compiler
if those lines are left in place.
EDIT2: Here's the actual source code:
#include <array>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
constexpr char FRAGMENT_SHADER_SOURCE_FILE[] = "simple_fragment.shader";
constexpr char VERTEX_SHADER_SOURCE_FILE[] = "simple_vertex.shader";
constexpr int WINDOW_WIDTH = 800;
constexpr int WINDOW_HEIGHT = 800;
constexpr char WINDOW_TITLE[] = "Triangle";
constexpr std::array<GLfloat, 4> bgColour { 0.3f, 0.1f, 0.3f, 1.0f };
/*
* Instructs GLFW to close window if escape key is pressed.
*/
void keyCallback(GLFWwindow *window, int key, int scancode, int action, int mode);
int main() {
// Start GLFW.
if (not glfwInit()) {
std::cerr << "ERROR: Failed to start GLFW.\n";
return 1;
}
// Set OpenGL version.
//glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
//glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// Create window and bind to current contex.
GLFWwindow *window = glfwCreateWindow(WINDOW_WIDTH, WINDOW_HEIGHT, WINDOW_TITLE, nullptr,
nullptr);
if (not window) {
std::cerr << "ERROR: Failed to create GLFW window.\n";
glfwTerminate();
return 1;
}
glfwMakeContextCurrent(window);
// Set keyboard callback functions.
glfwSetKeyCallback(window, keyCallback);
// Initialize GLEW with experimental features turned on.
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) {
std::cerr << "ERROR: Failed to start GLEW.\n";
glfwTerminate();
return 1;
}
// Create viewport coordinate system.
int width, height;
glfwGetFramebufferSize(window, &width, &height);
glViewport(0, 0, static_cast<GLsizei>(width), static_cast<GLsizei>(height));
// Create a vertex shader object.
GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
// Load the vertex shader source code.
std::string vertexShaderSource;
std::ifstream vsfs(VERTEX_SHADER_SOURCE_FILE);
if (vsfs.is_open()) {
std::stringstream ss;
ss << vsfs.rdbuf();
vertexShaderSource = ss.str();
}
else {
std::cerr << "ERROR: File " << VERTEX_SHADER_SOURCE_FILE << " could not be found.\n";
glfwTerminate();
return 1;
}
// Attach the shader source code to the vertex shader object and compile.
const char *vertexShaderSource_cstr = vertexShaderSource.c_str();
glShaderSource(vertexShader, 1, &vertexShaderSource_cstr, nullptr);
glCompileShader(vertexShader);
// Check if compilation was successful.
GLint success;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
if (not success) {
std::cerr << "ERROR: Vertex shader compilation failed.\n";
glfwTerminate();
return 1;
}
// Create a fragment shader object.
GLuint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
// Load the fragment shader source code.
std::string fragmentShaderSource;
std::ifstream fsfs(FRAGMENT_SHADER_SOURCE_FILE);
if (fsfs.is_open()) {
std::stringstream ss;
ss << fsfs.rdbuf();
fragmentShaderSource = ss.str();
}
else {
std::cerr << "ERROR: File " << FRAGMENT_SHADER_SOURCE_FILE << " could not be found.\n";
glfwTerminate();
return 1;
}
// Attach the shader source code to the fragment shader object and compile.
const char *fragmentShaderSource_cstr = fragmentShaderSource.c_str();
glShaderSource(fragmentShader, 1, &fragmentShaderSource_cstr, nullptr);
glCompileShader(fragmentShader);
// Check if compilation was successful.
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success);
if (not success) {
std::cerr << "ERROR: Fragment shader compilation failed.\n";
glfwTerminate();
return 1;
}
// Create a shader program by linking the vertex and fragment shaders.
GLuint shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
// Check that shader program was successfully linked.
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success);
if (not success) {
std::cerr << "ERROR: Shader program linking failed.\n";
glfwTerminate();
return 1;
}
// Delete shader objects.
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
// Coordinates of triangle vertices in Normalized Device Coordinates (NDC).
std::array<GLfloat, 9> vertices {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
// Create a vertex array object.
GLuint vao;
glGenBuffers(1, &vao);
glBindVertexArray(vao);
// Create a vertex buffer object.
GLuint vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// Pass vertex data into currently bound vertex buffer object.
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices.data(), GL_STATIC_DRAW);
// Create vertex attribute.
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), static_cast<GLvoid*>(0));
glEnableVertexAttribArray(0);
// Unbind the vertex array object and vertex buffer object.
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glClearColor(bgColour[0], bgColour[1], bgColour[2], bgColour[3]);
while (not glfwWindowShouldClose(window)) {
glClear(GL_COLOR_BUFFER_BIT);
glfwPollEvents();
// Inform OpenGL to use the shader program created above.
glUseProgram(shaderProgram);
// Bind the vertex array object.
glBindVertexArray(vao);
// Draw the triangle.
glDrawArrays(GL_TRIANGLES, 0, 3);
// Unbind the vertex array object.
glBindVertexArray(0);
glfwSwapBuffers(window);
}
// Delete vertex array object.
glDeleteVertexArrays(1, &vao);
// Delete vertex buffer object.
glDeleteBuffers(1, &vbo);
// Delete shader program.
glDeleteProgram(shaderProgram);
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}
void keyCallback(GLFWwindow *window, int key, int scancode, int action, int mode) {
if (key == GLFW_KEY_ESCAPE and action == GLFW_PRESS) {
glfwSetWindowShouldClose(window, GL_TRUE);
}
}
Here are the contents of simple_vertex.shader and simple_fragment.shader:
#version 330 core
layout (location = 0) in vec3 position;
void main() {
gl_Position = vec4(position.x, position.y, position.z, 1.0);
}
and
#version 330 core
out vec4 color;
void main() {
color = vec4(1.0f, 0.5f, 0.2f, 1.0f);
}
I made a typo in my code.
I used the function glGenBuffers instead of glGenVertexArrays to create my vertex array object. Apparently Nvidia accepts this, unless I specify an OpenGL version. I still find it puzzling but at least the problem is fixed.