I have been writing a simple GTK+ application and am just getting started with graphical development. I understand that this may not be a good place to start, jumping straight into 3D rendering, but I've done a small amount of it before and with great success using Glade and reading a plethora of docs, I figured it would not be hard to integrate the two - I figured incorrectly. The problem at hand is that glDrawArrays appears to not be working. I looked at this question and unfortunately, it did not help me. I followed some of this tutorial on OpenGL and also this tutorial on GtkGLArea again to no avail.
Can anyone point me in the right direction on this one? I'm not sure where to go from here.
The relevant code is below:
#include "RenderingManager.hpp"
RenderingManager::RenderingManager() {
///GTK+ Setup///
std::cout << "starting render constructor" << std::endl;
glArea = GTK_GL_AREA(gtk_gl_area_new());
std::cout << "got new glarea" << std::endl;
g_signal_connect(GTK_WIDGET(glArea), "render", G_CALLBACK(signal_render), this);
g_signal_connect(GTK_WIDGET(glArea), "realize", G_CALLBACK(signal_realize), this);
g_signal_connect(GTK_WIDGET(glArea), "unrealize", G_CALLBACK(signal_unrealize), this);
gtk_widget_show(GTK_WIDGET(glArea));
///Get Shaders///
// vshader.open("vertex.shader");
// fshader.open("fragment.shader");
std::cout << "finished render constructor" << std::endl;
}
void RenderingManager::onRender() {
// Dark blue background
glClearColor(0.1f, 0.0f, 0.1f, 0.0f);
draw_triangle();
glFlush();
}
void RenderingManager::initBuffers () {
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
}
void RenderingManager::loadShaders() {
// Read the Vertex Shader code from the file
std::ifstream VertexShaderStream("vertex.shader", std::ios::in);
if(VertexShaderStream.is_open()){
std::string Line = "";
while(getline(VertexShaderStream, Line))
vshader += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::ifstream FragmentShaderStream("fragment.shader", std::ios::in);
if(FragmentShaderStream.is_open()){
std::string Line = "";
while(getline(FragmentShaderStream, Line))
fshader += "\n" + Line;
FragmentShaderStream.close();
}
GLuint vsh, fsh;
vsh = glCreateShader(GL_VERTEX_SHADER);
fsh = glCreateShader(GL_FRAGMENT_SHADER);
vshp = vshader.data();
fshp = fshader.data();
// vshp = vshader.get().c_str();
// fshp = fshader.get().c_str();
// vshader.get(vshp);
// fshader.get(fshp);
printf("%s\n%s\n", vshp, fshp);
glShaderSource(vsh, 1, &vshp, NULL);
glShaderSource(fsh, 1, &fshp, NULL);
glCompileShader(vsh);
glCompileShader(fsh);
shaderProgramID = glCreateProgram();
glAttachShader(shaderProgramID, vsh);
glAttachShader(shaderProgramID, fsh);
glLinkProgram(shaderProgramID);
GLint Result = GL_FALSE;
int InfoLogLength;
// Check Vertex Shader
glGetShaderiv(vsh, GL_COMPILE_STATUS, &Result);
glGetShaderiv(vsh, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
char* VertexShaderErrorMessage = new char[InfoLogLength+1];
glGetShaderInfoLog(vsh, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
// Check Fragment Shader
glGetShaderiv(fsh, GL_COMPILE_STATUS, &Result);
glGetShaderiv(fsh, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
char* FragmentShaderErrorMessage = new char[InfoLogLength+1];
glGetShaderInfoLog(fsh, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
}
void RenderingManager::onActivate() {
// We need to make the context current if we want to
// call GL API
gtk_gl_area_make_current (glArea);
glewExperimental = GL_TRUE;
glewInit();
loadShaders();
initBuffers();
}
void RenderingManager::signal_render(GtkGLArea *a, gpointer *user_data) {
reinterpret_cast<RenderingManager*>(user_data)->onRender();
}
void RenderingManager::signal_realize(GtkGLArea *a, gpointer *user_data) {
reinterpret_cast<RenderingManager*>(user_data)->onActivate();
}
void RenderingManager::signal_unrealize(GtkGLArea *a, gpointer *user_data) {
//Don't do this
//reinterpret_cast<RenderingManager*>(user_data)->~RenderingManager();
}
void RenderingManager::draw_triangle() {
// Clear the screen
glClear( GL_COLOR_BUFFER_BIT );
// Use our shader
glUseProgram(shaderProgramID);
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3); // 3 indices starting at 0 -> 1 triangle
glDisableVertexAttribArray(0);
}
GtkGLArea *RenderingManager::expose() {
//yikes
return glArea;
}
RenderingManager::~RenderingManager() {
glDeleteBuffers(1, &vbo);
glDeleteVertexArrays(1, &vao);
glDeleteProgram(shaderProgramID);
std::cout << "GL Resources deleted." << std::endl;
}
Due to the asynchronous nature of X11 (Gtk+ uses it) the gl-context can't be created before the window is realized (a connection to X11 is made).
Create the gl-context in your signal_realize() and make it current before drawing, which should be done handling signal expose_event (gtk+ 2) or draw(gtk+ 3)
Related
I was trying to draw with coordinates being of integer type, but failed. Then I tried to draw a simple triangle as if from tutorial. In the result half of the screen was of triangle's color, other was glClearColor I set. I started changing values, and corners started to show up when coordinates were smaller than 0.00000000000000000005f (which i suspect would be of higher precision than a float), though should have been already with smaller than 1.0f. Trying to search didn't have any results, perhaps because I don't know the right words.
Here's code I ended up with:
Includes and function I quickly moved here:
#include <GL/glew.h>
#include <glfw3.h>
#include <unistd.h>
#include <stdio.h>
/* function that creates shader */
GLuint makeShader(const GLchar* vertexShaderSource, const GLchar* fragmentShaderSource,
GLchar*& infoLog, const GLchar* geometryShaderSource = NULL)
{
GLint isSuccess;
infoLog = nullptr;
GLuint vertexShaderID = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShaderID, 1, &vertexShaderSource, nullptr);
glCompileShader(vertexShaderID);
glGetShaderiv(vertexShaderID, GL_COMPILE_STATUS, &isSuccess);
if(!isSuccess)
{
infoLog = new GLchar[1024];
glGetShaderInfoLog(vertexShaderID, 1024, nullptr, infoLog);
printf("Error compiling vertex shader: %s\n", infoLog);
printf("\n%s\n\n", vertexShaderSource);
return 0;
}
GLuint geometryShaderID;
if(geometryShaderSource!=NULL){
geometryShaderID = glCreateShader(GL_GEOMETRY_SHADER);
glShaderSource(geometryShaderID, 1, &geometryShaderSource, nullptr);
glCompileShader(geometryShaderID);
glGetShaderiv(geometryShaderID, GL_COMPILE_STATUS, &isSuccess);
if(!isSuccess)
{
infoLog = new GLchar[1024];
glGetShaderInfoLog(geometryShaderID, 1024, nullptr, infoLog);
printf("Error compiling geometry shader: %s\n", infoLog);
printf("\n%s\n\n", geometryShaderSource);
return 0;
}
}
GLuint fragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShaderID, 1, &fragmentShaderSource, nullptr);
glCompileShader(fragmentShaderID);
glGetShaderiv(fragmentShaderID, GL_COMPILE_STATUS, &isSuccess);
if(!isSuccess)
{
infoLog = new GLchar[1024];
glGetShaderInfoLog(fragmentShaderID, 1024, nullptr, infoLog);
printf("Error compiling fragment shader: %s\n", infoLog);
printf("\n%s\n\n", fragmentShaderSource);
return 0;
}
GLuint shaderID = glCreateProgram();
glAttachShader(shaderID, vertexShaderID);
glAttachShader(shaderID, fragmentShaderID);
if(geometryShaderSource!=NULL){
glAttachShader(shaderID, geometryShaderID);
}
glLinkProgram (shaderID);
glValidateProgram(shaderID);
glDeleteShader(vertexShaderID);
glDeleteShader(fragmentShaderID);
if(geometryShaderSource!=NULL){
glDeleteShader(geometryShaderID);
}
glGetProgramiv(shaderID, GL_LINK_STATUS, &isSuccess);
if (!isSuccess)
{
infoLog = new GLchar[1024];
glGetProgramInfoLog(shaderID, 1024, nullptr, infoLog); // Generate error as infoLog
//std::cout << "Error " << infoLog << std::endl; // Display
printf("Error linking shader %s\n", infoLog);
return 0;
}
return shaderID;
}
Main:
int main(void)
{
GLFWwindow* window;
float FPSlimit = 60;
int windowSX = 800, windowSY = 600;
/* Initialize the library */
if (!glfwInit()){
return -1;}
/* Create window */
window = glfwCreateWindow(windowSX, windowSY, "abc", NULL, NULL);
if (!window)
{
printf("Failed to create window\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
glfwSwapInterval(0);
/* Initialize GLEW */
if(glewInit()){
printf("Failed to initialize GLEW\n");
glfwTerminate();
return -1;
}
glClearColor(0.1, 0.3f, 0.2f, 1.0f);
double timeDeltaFPSmaintain = 0;
GLuint vao;
GLuint vbo;
glGenVertexArrays(1, &vao);
glGenBuffers(1, &vbo);
glBindVertexArray(vao);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, (void*)0);
//changing normalisation to GL_TRUE didnt do anything
float dat[] = {-0.000000000000000000024f, -0.000000000000000000024f, 0.f,
1.f, -1.f, 0.f,
0.f, 0.000000000000000000054f, 0.f};
glBufferData(GL_ARRAY_BUFFER, 3*3*sizeof(float), dat, GL_STATIC_DRAW);
GLchar* log;
const GLchar* v = "#version 330 \n layout (location = 0) in vec3 pos;\n void main(){gl_Position = vec4(pos.xyz, 0);}";
const GLchar* f = "#version 330 \n out vec4 color; void main(){color = vec4(1, 0, 0, 0);}";
GLuint sha = makeShader(v, f, log);
printf("%i\n", glGetError());//Shows 0
/* Main loop: */
while (!glfwWindowShouldClose(window))
{
glClear(GL_COLOR_BUFFER_BIT);
glUseProgram(sha);
glBindVertexArray(vao);
glDrawArrays(GL_TRIANGLES, 0, 3);
glfwSwapBuffers(window);
glfwPollEvents();
// nvidia starts to use 100% of one of CPU cores with vSync for some reason
timeDeltaFPSmaintain = (glfwGetTime() - timeDeltaFPSmaintain)*1000000;
if(FPSlimit>1){
usleep(1000000/FPSlimit-unsigned(timeDeltaFPSmaintain));}
timeDeltaFPSmaintain = glfwGetTime();
}
glfwTerminate();
return 0;
}
Compiler also tells me about corrupt .drective at the end of def file but I doubt it matters much.
The transformation in your vertex shader is just broken:
gl_Position = vec4(pos.xyz, 0);
Setting w_clip = 0 will mean that conceptually, your points are infinitly far away into the direction of x,y,z, and the GPU will clip your primitive to the intersection with the viewing volume (which still is finite).
Also:
I started changing values, and corners started to show up when coordinates were smaller than 0.00000000000000000005f (which i suspect would be of higher precision than a float),
That's not how floating point numbers work, the precision approaching zero is quite high, the smallest non-denormalized single precision floating point number will be 2^(-126), which is approximately 0.000000000000000000000000000000000000012, and you can get down to 2^-149 if you go into the de-normalized range.
My computer runs Ubuntu 16.04 and is equipped with a Nvidia GeForce GT 630M graphics card with a proprietary driver installed. The glGetString(GL_VERSION) function shows that, by default, my graphics card supports OpenGL 4.5.
I have been following the Learn OpenGL tutorial series and I have the following difficulty: I can only get the tutorial's "Hello Triangle" program to run properly if I comment out the lines
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
Leaving those lines as-is will prevent the triangle from appearing.
I am having trouble understanding why setting a required OpenGL version lower than the OpenGL version my card can support would make the program fail.
EDIT: the commands
std::cout << "Renderer: " << glGetString(GL_RENDERER) << std::endl;
std::cout << "Version: " << glGetString(GL_VERSION) << std::endl;
std::cout << "Shading Language: " << glGetString(GL_SHADING_LANGUAGE_VERSION) << std::endl;
output
Renderer: GeForce GT 630M/PCIe/SSE2
Version: 4.5.0 NVIDIA 361.42
Shading Language: 4.50 NVIDIA
if those lines are commented out, and
Renderer: GeForce GT 630M/PCIe/SSE2
Version: 3.3.0 NVIDIA 361.42
Shading Language: 3.30 NVIDIA via Cg compiler
if those lines are left in place.
EDIT2: Here's the actual source code:
#include <array>
#include <fstream>
#include <iostream>
#include <sstream>
#include <string>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
constexpr char FRAGMENT_SHADER_SOURCE_FILE[] = "simple_fragment.shader";
constexpr char VERTEX_SHADER_SOURCE_FILE[] = "simple_vertex.shader";
constexpr int WINDOW_WIDTH = 800;
constexpr int WINDOW_HEIGHT = 800;
constexpr char WINDOW_TITLE[] = "Triangle";
constexpr std::array<GLfloat, 4> bgColour { 0.3f, 0.1f, 0.3f, 1.0f };
/*
* Instructs GLFW to close window if escape key is pressed.
*/
void keyCallback(GLFWwindow *window, int key, int scancode, int action, int mode);
int main() {
// Start GLFW.
if (not glfwInit()) {
std::cerr << "ERROR: Failed to start GLFW.\n";
return 1;
}
// Set OpenGL version.
//glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
//glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
//glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// Create window and bind to current contex.
GLFWwindow *window = glfwCreateWindow(WINDOW_WIDTH, WINDOW_HEIGHT, WINDOW_TITLE, nullptr,
nullptr);
if (not window) {
std::cerr << "ERROR: Failed to create GLFW window.\n";
glfwTerminate();
return 1;
}
glfwMakeContextCurrent(window);
// Set keyboard callback functions.
glfwSetKeyCallback(window, keyCallback);
// Initialize GLEW with experimental features turned on.
glewExperimental = GL_TRUE;
if (glewInit() != GLEW_OK) {
std::cerr << "ERROR: Failed to start GLEW.\n";
glfwTerminate();
return 1;
}
// Create viewport coordinate system.
int width, height;
glfwGetFramebufferSize(window, &width, &height);
glViewport(0, 0, static_cast<GLsizei>(width), static_cast<GLsizei>(height));
// Create a vertex shader object.
GLuint vertexShader = glCreateShader(GL_VERTEX_SHADER);
// Load the vertex shader source code.
std::string vertexShaderSource;
std::ifstream vsfs(VERTEX_SHADER_SOURCE_FILE);
if (vsfs.is_open()) {
std::stringstream ss;
ss << vsfs.rdbuf();
vertexShaderSource = ss.str();
}
else {
std::cerr << "ERROR: File " << VERTEX_SHADER_SOURCE_FILE << " could not be found.\n";
glfwTerminate();
return 1;
}
// Attach the shader source code to the vertex shader object and compile.
const char *vertexShaderSource_cstr = vertexShaderSource.c_str();
glShaderSource(vertexShader, 1, &vertexShaderSource_cstr, nullptr);
glCompileShader(vertexShader);
// Check if compilation was successful.
GLint success;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
if (not success) {
std::cerr << "ERROR: Vertex shader compilation failed.\n";
glfwTerminate();
return 1;
}
// Create a fragment shader object.
GLuint fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
// Load the fragment shader source code.
std::string fragmentShaderSource;
std::ifstream fsfs(FRAGMENT_SHADER_SOURCE_FILE);
if (fsfs.is_open()) {
std::stringstream ss;
ss << fsfs.rdbuf();
fragmentShaderSource = ss.str();
}
else {
std::cerr << "ERROR: File " << FRAGMENT_SHADER_SOURCE_FILE << " could not be found.\n";
glfwTerminate();
return 1;
}
// Attach the shader source code to the fragment shader object and compile.
const char *fragmentShaderSource_cstr = fragmentShaderSource.c_str();
glShaderSource(fragmentShader, 1, &fragmentShaderSource_cstr, nullptr);
glCompileShader(fragmentShader);
// Check if compilation was successful.
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success);
if (not success) {
std::cerr << "ERROR: Fragment shader compilation failed.\n";
glfwTerminate();
return 1;
}
// Create a shader program by linking the vertex and fragment shaders.
GLuint shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
// Check that shader program was successfully linked.
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success);
if (not success) {
std::cerr << "ERROR: Shader program linking failed.\n";
glfwTerminate();
return 1;
}
// Delete shader objects.
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
// Coordinates of triangle vertices in Normalized Device Coordinates (NDC).
std::array<GLfloat, 9> vertices {
-0.5f, -0.5f, 0.0f,
0.5f, -0.5f, 0.0f,
0.0f, 0.5f, 0.0f
};
// Create a vertex array object.
GLuint vao;
glGenBuffers(1, &vao);
glBindVertexArray(vao);
// Create a vertex buffer object.
GLuint vbo;
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
// Pass vertex data into currently bound vertex buffer object.
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices.data(), GL_STATIC_DRAW);
// Create vertex attribute.
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(GLfloat), static_cast<GLvoid*>(0));
glEnableVertexAttribArray(0);
// Unbind the vertex array object and vertex buffer object.
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
glClearColor(bgColour[0], bgColour[1], bgColour[2], bgColour[3]);
while (not glfwWindowShouldClose(window)) {
glClear(GL_COLOR_BUFFER_BIT);
glfwPollEvents();
// Inform OpenGL to use the shader program created above.
glUseProgram(shaderProgram);
// Bind the vertex array object.
glBindVertexArray(vao);
// Draw the triangle.
glDrawArrays(GL_TRIANGLES, 0, 3);
// Unbind the vertex array object.
glBindVertexArray(0);
glfwSwapBuffers(window);
}
// Delete vertex array object.
glDeleteVertexArrays(1, &vao);
// Delete vertex buffer object.
glDeleteBuffers(1, &vbo);
// Delete shader program.
glDeleteProgram(shaderProgram);
glfwDestroyWindow(window);
glfwTerminate();
return 0;
}
void keyCallback(GLFWwindow *window, int key, int scancode, int action, int mode) {
if (key == GLFW_KEY_ESCAPE and action == GLFW_PRESS) {
glfwSetWindowShouldClose(window, GL_TRUE);
}
}
Here are the contents of simple_vertex.shader and simple_fragment.shader:
#version 330 core
layout (location = 0) in vec3 position;
void main() {
gl_Position = vec4(position.x, position.y, position.z, 1.0);
}
and
#version 330 core
out vec4 color;
void main() {
color = vec4(1.0f, 0.5f, 0.2f, 1.0f);
}
I made a typo in my code.
I used the function glGenBuffers instead of glGenVertexArrays to create my vertex array object. Apparently Nvidia accepts this, unless I specify an OpenGL version. I still find it puzzling but at least the problem is fixed.
I am getting an error when trying to use VAO's inside of SFML and not sure if it is SFML or it is my own opengl code
GLenum err = glewInit();
if (err != GLEW_OK)
{
std::cout << "NOT WORKING" << std::endl;
}
std::vector<sf::Vector3f> g_vertext_buffer_data;
g_vertex_buffer_data.push_back({ -1.0f, -1.0f, 0.0f });
g_vertex_buffer_data.push_back({1.0f, -1.0f, 0.0f});
g_vertex_buffer_data.push_back({ 0.0f, 1.0f, 0.0f });
const char* vertexShaderSource =
"#version 330\n\
in vec4 position;\
void main(void){\ gl_Position = position;\
}";
// compile fragment shader source
const GLchar* fragmentShaderSource =
"#version 330\n\
void main(void) {\
out vec4 fragcolor; fragcolor= vec4(1.0,1.0,1.0,1.0);\
}";
/* Creating Shader */
this->programId = glCreateProgram();
this->vId = glCreateShader(GL_VERTEX_SHADER);
this->fId = glCreateShader(GL_FRAGMENT_SHADER);
/* Get Shader Size */
int vertexShaderLength = strlen(vertexShaderSource);
int fragmentShaderLength = strlen(fragmentShaderSource);
/* Loading and binding shader */
glShaderSource(this->vId, 1, &vertexShaderSource, NULL);
glShaderSource(this->fId, 1, &fragmentShaderSource, NULL);
/* Compile Shaders */
glCompileShader(vId);
glCompileShader(fId);
/* Attach Shaders */
glAttachShader(this->programId, this->vId);
glAttachShader(this->programId, this->fId);
/* Linkg program */
glLinkProgram(this->programId);
/* Use and bind attribute */
glUseProgram(this->programId);
this->positionId = glGetAttribLocation(this->programId, "position");
glUseProgram(0);
/* VAO Time */
glGenVertexArrays(1, &this->vaoId);
glBindVertexArray(this->vaoId);
/* VBO Time assigning to VAO */
glGenBuffers(1, &this->vboId);
glBindBuffer(GL_ARRAY_BUFFER, this->vboId);
glBufferData(GL_ARRAY_BUFFER, g_vertex_buffer_data.size() * sizeof(sf::Vector3f), &g_vertex_buffer_data[0], GL_STATIC_DRAW);
glEnableVertexAttribArray(this->positionId);
glVertexAttribPointer(this->positionId, 2, GL_FLOAT, GL_FALSE, sizeof(sf::Vector3f), 0);
/* Close out bindings */
glBindVertexArray(0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
while(1)
{
glUseProgram(this->programId);
glBindVertexArray(this->vaoId);
glDrawArrays(GL_TRIANGLES, 0, 3);
glBindVertexArray(0);
glUseProgram(0);
gameWindow.glPushStates();
}
The error code I get is: opengl error in user code (1282)
I have changed the size() issue that was brought up in the blBufferData() but still am getting the issue.
There is at least a problem with the size that is passed to glBufferData:
glBufferData(GL_ARRAY_BUFFER,
sizeof(g_vertex_buffer_data) * sizeof(sf::Vector3f),
g_vertex_buffer_data[0], GL_STATIC_DRAW);
sizeof(g_vertex_buffer_data) is equal to sizeof(std::vector<?>) which is the size of the vector object and not the size of the data contained. Try using
glBufferData(GL_ARRAY_BUFFER,
g_vertex_buffer_data.size() * sizeof(sf::Vector3f),
g_vertex_buffer_data[0], GL_STATIC_DRAW);
Another thing: In OpenGL 3.3 Core Profile there is no gl_FragColor variable. You will have to define an out variable.
Next: Your vertex shader seems to be empty. You have to write to gl_Position otherwise nothing will be shown.
Possible error codes for glGetAttribLocation are:
GL_INVALID_OPERATION
Which don't have a fixed value. Try to get the error string with gluErrorString() or take a look in the header to which of those 1282 maps.
• check your shader got compiled without error?
• check your shader got linked without error?
What type have positionId? All object id's must be GLuint type.
And btw allways enable shader compilation-linking error check, and debug will be more informative.
I do that in this way (OpenGL-ES 2.0):
m_nVertexShader = glCreateShader(GL_VERTEX_SHADER);
m_nPixelShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(m_nVertexShader, 1, &lpszVertexBuffer, NULL);
glShaderSource(m_nPixelShader, 1, &lpszFragmentBuffer, NULL);
glCompileShader(m_nVertexShader);
int iIsOk = 0;
glGetShaderiv(m_nVertexShader, GL_COMPILE_STATUS, &iIsOk);
if(!iIsOk)
{
GLint infoLen = 0;
glGetShaderiv(m_nVertexShader, GL_INFO_LOG_LENGTH, &infoLen);
if(infoLen > 1)
{
char* infoLog = (char*)malloc(sizeof(char) * infoLen);
glGetShaderInfoLog(m_nVertexShader, infoLen, NULL, infoLog);
QMessageBox::warning(this, QString("Error"),
QString(infoLog), QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Yes);
free(infoLog);
}
glDeleteShader(m_nVertexShader);
return;
}
glCompileShader(m_nPixelShader);
glGetShaderiv(m_nPixelShader, GL_COMPILE_STATUS, &iIsOk);
if(!iIsOk)
{
GLint infoLen = 0;
glGetShaderiv(m_nPixelShader, GL_INFO_LOG_LENGTH, &infoLen);
if(infoLen > 1)
{
char* infoLog = (char*)malloc(sizeof(char) * infoLen);
glGetShaderInfoLog(m_nPixelShader, infoLen, NULL, infoLog);
QMessageBox::warning(this, QString("Error"),
QString(infoLog), QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Yes);
free(infoLog);
}
glDeleteShader(m_nPixelShader);
return;
}
m_nProgram = glCreateProgram();
glAttachShader(m_nProgram, m_nVertexShader);
glAttachShader(m_nProgram, m_nPixelShader);
glBindAttribLocation(m_nProgram, 0, "rm_Vertex");
glLinkProgram(m_nProgram);
glGetProgramiv(m_nProgram, GL_LINK_STATUS, &iIsOk);
// Fail to pass status validation
if(!iIsOk)
{
GLint infoLen = 0;
glGetProgramiv(m_nProgram, GL_INFO_LOG_LENGTH, &infoLen);
if(infoLen > 1)
{
char* infoLog = (char*)malloc(sizeof(char) * infoLen);
glGetProgramInfoLog(m_nProgram, infoLen, NULL, infoLog);
QMessageBox::warning(this, QString("Error"),
QString(infoLog), QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Yes);
free(infoLog);
}
glDeleteProgram(m_nProgram);
return;
}
glUseProgram(m_nProgram);
As you use GLSL 3.3, fist you must specify fragment rendertarget output by calling
glBindFragDataLocation(this->programId, 0, "fragcolor");
Secondly your fragment shader must be like
"#version 330
out vec4 fragcolor;
void main(void) {
fragcolor= vec4(1.0,1.0,1.0,1.0);
}
The example of using this kind of shaders is on OpenGL 3.3 + GLSL 1.5 Sample.
I am following the guides from this site and have stopped on the lesson 2. At first I have tried wiriting my own code but after it didn't work I have simply taken the code from the site. And it still doesn't draw anything besides the glClearColor.
What I have done:
Checked compiling and linking. Works fine
Checked errors. Not sure if I have done it right but seems like everything's allright (I get a 1280 error but I have read that GLEW can cause it and it can be ignored).
Moved the glUseProgram through the main loop but didn't get any results.
Changed colors and tried modifying shaders. Still nothing
I will post the code I have at the moment (the original code from the site):
main.cpp
#include <stdio.h>
#include <stdlib.h>
#include <glew.h>
#include <glfw3.h>
#include <glm/glm.hpp>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
int max(int i, int j)
{
if (i > j) return i;
return j;
}
GLuint LoadShaders(const char * vertex_file_path, const char * fragment_file_path){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
// Read the Vertex Shader code from the file
std::string VertexShaderCode;
std::ifstream VertexShaderStream(vertex_file_path, std::ios::in);
if (VertexShaderStream.is_open())
{
std::string Line = "";
while (getline(VertexShaderStream, Line))
VertexShaderCode += "\n" + Line;
VertexShaderStream.close();
}
// Read the Fragment Shader code from the file
std::string FragmentShaderCode;
std::ifstream FragmentShaderStream(fragment_file_path, std::ios::in);
if (FragmentShaderStream.is_open()){
std::string Line = "";
while (getline(FragmentShaderStream, Line))
FragmentShaderCode += "\n" + Line;
FragmentShaderStream.close();
}
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
printf("Compiling shader : %s\n", vertex_file_path);
char const * VertexSourcePointer = VertexShaderCode.c_str();
glShaderSource(VertexShaderID, 1, &VertexSourcePointer, NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> VertexShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &VertexShaderErrorMessage[0]);
// Compile Fragment Shader
printf("Compiling shader : %s\n", fragment_file_path);
char const * FragmentSourcePointer = FragmentShaderCode.c_str();
glShaderSource(FragmentShaderID, 1, &FragmentSourcePointer, NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> FragmentShaderErrorMessage(InfoLogLength);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
fprintf(stdout, "%s\n", &FragmentShaderErrorMessage[0]);
// Link the program
fprintf(stdout, "Linking program\n\n");
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
std::vector<char> ProgramErrorMessage(max(InfoLogLength, int(1)));
std::cout << "Checking program\n";
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
fprintf(stdout, "%s\n", &ProgramErrorMessage[0]);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
std::cout << "END";
return ProgramID;
}
int main(void)
{
if (!glfwInit())
{
std::cout << "Cannot init glfw";
return -1;
}
//glfwWindowHint(GLFW_SAMPLES, 4); // 4x antialiasing
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); // We want OpenGL 3.3
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // To make MacOS happy; should not be needed
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); //We don't want the old OpenGL
// Open a window and create its OpenGL context
GLFWwindow* window; // (In the accompanying source code, this variable is global)
window = glfwCreateWindow(1024, 768, "Tutorial 01", NULL, NULL);
if (window == NULL){
fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window); // Initialize GLEW
glewExperimental = GL_TRUE; // Needed in core profile
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
return -1;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
// This will identify our vertex buffer
GLuint vertexbuffer;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
static const GLfloat g_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f
};
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
glClearColor(0.0f, 0.0f, 0.4f, 0.0f);
GLuint programID = LoadShaders("res\\vertex.glsl", "res\\fragment.glsl");
do{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glUseProgram(programID);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle
glDisableVertexAttribArray(0);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
} // Check if the ESC key was pressed or the window was closed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
return 0;
}
fragment.glsl
#version 330 core
out vec3 color;
void main(){
color = vec3(1,1,0);
}
vertex.glsl
#version 330 core
layout(location = 0) in vec3 vertexPosition_modelspace;
void main(){
gl_Position.xyz = vertexPosition_modelspace;
gl_Position.w = 1.0;
}
The OpenGL Core Profile requires the use of Vertex Array Objects (VAOs). This is in the "Deprecated and Removed Features" of the spec:
Client vertex and index arrays - all vertex array attribute and element array index pointers must refer to buffer objects. The default vertex array object (the name zero) is also deprecated. Calling VertexAttribPointer when no buffer object or no vertex array object is bound will generate an INVALID_OPERATION error, as will calling any array drawing command when no vertex array object is bound.
The tutorial you are using suggests to use this code as part of your initialization:
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
This will be enough to get the tutorial code working. To make productive use of VAOs in more complex applications, you will probably want to create a VAO for each object. This will then track the full vertex setup state for the object, and allow you to set the state with a single glBindVertexArray() call before drawing.
Edit II:
Current Code works great! Thanks everyone. I went ahead and included my shader code for reference at the bottom though they do absolutely nothing at this point really.
I am trying to get up and going with OpenGL 4.1 and am still very early in development. Currently I'm not even really using 4.0 features yet in this project, so this is just as much an OpenGL 3 question as well.
The goal I was working on first was simply working out two classes to handle VAOs and VBOs. I had some misconceptions but finally got past the blank screen.
/* THIS CODE IS NOW FULLY FUNCTIONAL */
/* well, fully is questionable lol, should work out of the box with glew and glfw */
/* A simple function that will read a file into an allocated char pointer buffer */
/* Borrowed from OpenGL.org tutorial */
char* filePull(char *file)
{
FILE *fptr;
long length;
char *buf;
fptr = fopen(file, "r"); /* Open file for reading */
if (!fptr) /* Return NULL on failure */
return NULL;
fseek(fptr, 0, SEEK_END); /* Seek to the end of the file */
length = ftell(fptr); /* Find out how many bytes into the file we are */
buf = (char*)malloc(length+1); /* Allocate a buffer for the entire length of the file and a null terminator */
fseek(fptr, 0, SEEK_SET); /* Go back to the beginning of the file */
fread(buf, length, 1, fptr); /* Read the contents of the file in to the buffer */
fclose(fptr); /* Close the file */
buf[length] = 0; /* Null terminator */
return buf; /* Return the buffer */
}
class VBO
{
public:
GLuint buffer;
bool isBound;
vector<void*> belongTo;
vector<GLfloat> vertex;
GLenum usage;
void Load()
{ glBufferData(GL_ARRAY_BUFFER, vertex.size()*sizeof(GLfloat), &vertex[0], usage); }
void Create(void* parent)
{
glGenBuffers(1, &buffer);
glBindBuffer(GL_ARRAY_BUFFER, buffer);
glBufferData(GL_ARRAY_BUFFER, vertex.size()*sizeof(GLfloat), &vertex[0], usage);
isBound=true;
belongTo.push_back(parent);
}
void Activate()
{
if(!isBound) glBindBuffer(GL_ARRAY_BUFFER, buffer);
isBound=true;
}
void Deactivate(){ glBindBuffer(GL_ARRAY_BUFFER, 0); }
VBO() : isBound(false), usage(GL_STATIC_DRAW)
{ }
~VBO() { }
private:
};
class VAO
{
public:
GLuint buffer;
string key;
unsigned long long cursor;
vector<VBO> child;
void Create()
{
glGenVertexArrays(1, &buffer);
for(unsigned int i=0; i<child.size(); i++)
child[i].Create(this);
}
void Activate()
{
glBindVertexArray(buffer);
for(unsigned int i=0; i<child.size(); i++)
child[i].Activate();
}
void Release(){ glBindVertexArray(0); }
void Remove(){ glDeleteVertexArrays(1, &buffer); }
VAO() : buffer(1) { }
~VAO() { }
private:
};
int main()
{
int width=640, height=480, frame=1; bool running = true;
glfwInit();
if( !glfwOpenWindow( width, height, 0, 0, 0, 0, 0, 0, GLFW_WINDOW ) )
{ glfwTerminate(); return 13; }
glfwSetWindowTitle("Genesis");
glewInit();
cout<<(GLEW_VERSION_4_1?"yes":"no"); //yes
GLchar *vsource, *fsource;
GLuint _vs, _fs;
GLuint Shader;
vsource = filePull("base.vert");
fsource = filePull("base.frag");
/* Compile Shaders */
_vs = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(_vs, 1, (const GLchar**)&vsource, 0);
glCompileShader(_vs);
// glGetShaderiv(_vs, GL_COMPILE_STATUS, &IsCompiled_VS);
_fs = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(_fs, 1, (const GLchar**)&fsource, 0);
glCompileShader(_fs);
/***************** ^ Vertex | Fragment v *********************/
glAttachShader(Shader, _vs);
glAttachShader(Shader, _fs);
// glGetShaderiv(_fs, GL_COMPILE_STATUS, &IsCompiled_FS);
glBindAttribLocation(Shader, 0, "posIn");
glLinkProgram(Shader);
// glGetProgramiv(shaderprogram, GL_LINK_STATUS, (int *)&IsLinked);
VAO Object3D;
VBO myVBO[3];
glUseProgram(Shader);
for(int i=0; i<9; i++)
myVBO[0].vertex.push_back((i%9)*.11); //Arbitrary vertex values
Object3D.child.push_back(myVBO[0]);
Object3D.Create();
glClearColor( 0.7f, 0.74f, 0.77f, 0.0f ); //Black got lonely
int i=0; while(running)
{
frame++;
glfwGetWindowSize( &width, &height );
height = height > 0 ? height : 1;
glViewport( 0, 0, width, height );
glClear( GL_COLOR_BUFFER_BIT );
/* Bind, Draw, Unbind */
Object3D.Activate();
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, false, 0, 0);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 9);
Object3D.Release();
glfwSwapBuffers();
// exit if ESC was pressed or window was closed
running = !glfwGetKey(GLFW_KEY_ESC) && glfwGetWindowParam( GLFW_OPENED);
i++;
}
glUseProgram(0); glDisableVertexAttribArray(0);
glDetachShader(Shader, _vs); glDetachShader(Shader, _fs);
glDeleteProgram(Shader); glDeleteShader(_vs); glDeleteShader(_fs);
glDeleteVertexArrays(1, &Object3D.buffer);
glfwTerminate();
return 0;
}
Basically I'm just hoping to get anything on the screen at this point. I am using glfw and glew. Am I completely leaving some things out or do I only need to correct something? Code is somewhat mangled at the moment, sorry.
base.vert
// Fragment Shader – file "base.vert"
#version 300
in vec3 posIn;
out vec4 colorOut;
void main(void)
{
gl_Position = vec4(posIn, 1.0);
colorOut = vec4(3.0,6.0,4.0,1.0);
}
base.frag
// Vertex Shader – file "base.frag"
#version 300
out vec3 colorOut;
void main(void)
{
colorOut = vec3(1.0,10,1.0);
}
&vertex
vertex is a vector. Taking its address will not give you a pointer to the data.
Edit to add:
Right. It still does not work, because you have at least 2 more issues:
You don't call any gl*Pointer call. The GL won't know what it needs to pull from your vertex buffer objects
your vertex data that you put in your vertex array is 3 times the same vertex. A triangle with the 3 points at the same location:
for(int i=0; i<9; i++)
myVBO[0].vertex.push_back((i%3)*.2); //Arbitrary vertex values
It creates 3 (.0 .2 .4) vectors, all at the same location.
That iBound member of VBO looks suspicious. The OpenGL binding state may change, for example after switching the bound VAO, but the VBO class instance still thinks it's active. Just drop iBound altogether and re-bind every time you need the object. With modern drivers rebinding an already bound object is almost for free.