I have read some tutorials to write the following code.
The only difference is the original tutorials where using SDL instead of GLEW.
I do not understand what is wrong in this code. It compiles but i do not see the triangle. (the tutorial were not using shaders too)
#include <iostream>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GLFW/glfw3.h>
GLFWwindow* window;
int main(int argc, const char * argv[])
{
if (!glfwInit())
{
return -1;
}
glfwWindowHint(GLFW_SAMPLES, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
window = glfwCreateWindow(640, 480, "Test", NULL, NULL);
if (window==NULL)
{
return -1;
}
glfwMakeContextCurrent(window);
glewExperimental = true;
if (glewInit() != GLEW_OK)
{
return -1;
}
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
glClearColor(0.0f, 1.0f, 1.0f, 1.0f);
do
{
glfwPollEvents();
float vertices[] = {-0.5, -0.5, 0.0, 0.5, 0.5, -0.5};
glClear(GL_COLOR_BUFFER_BIT);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, vertices);
glEnableVertexAttribArray(0);
glDrawArrays(GL_TRIANGLES, 0, 3);
glDisableVertexAttribArray(0);
glfwSwapBuffers(window);
}
while( glfwGetKey(window, GLFW_KEY_ESCAPE ) != GLFW_PRESS && glfwWindowShouldClose(window) == 0 );
glfwTerminate();
return 0;
}
If you're using the fixed-function pipeline, you cannot use generic vertex attributes like glVertexAttribPointer.
NVIDIA's implementation, however, illegally aliases between generic attributes and non-generic ones. This is probably why the initial writer of the tutorial got away with it on their machine.
If you want to write this in a cross-platform way, you have to use glVertexPointer and glEnableClientState:
glVertexPointer(2, GL_FLOAT, 0, vertices);
glEnableClientState(GL_VERTEX_ARRAY);
Related
I am trying to make a triangle and I'm using GLEW, GLFW and GLM as extensions of OpenGL.
Here's the code I have:
#include <stdlib.h>
#include <stdio.h>
#include <gl\glew.h>
#include <GLFW\glfw3.h>
#include <glm\glm.hpp>
using namespace glm;
int main()
{
if (!glfwInit())
{
fprintf(stderr, "Failed to initialize GLFW\n");
return -1;
}
glfwWindowHint(GLFW_SAMPLES, 4); // 4x antialiasing
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); // We want OpenGL 3.3
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // To make MacOS happy; should not be needed
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); //We don't want the old OpenGL
// Open a window and create its OpenGL context
GLFWwindow* window; // (In the accompanying source code, this variable is global)
window = glfwCreateWindow(1024, 768, "Tutorial 01", NULL, NULL);
if (window == NULL)
{
fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window); // Initialize GLEW
glewExperimental = true; // Needed in core profile
if (glewInit() != GLEW_OK)
{
fprintf(stderr, "Failed to initialize GLEW\n");
return -1;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// An array of 3 vectors which represents 3 vertices
static const GLfloat g_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
0.0f, 1.0f, 0.0f,
};
// This will identify our vertex buffer
GLuint vertexbuffer;
// Generate 1 buffer, put the resulting identifier in vertexbuffer
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
do {
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
// Draw the triangle !
glDrawArrays(GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle
glDisableVertexAttribArray(0);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
} // Check if the ESC key was pressed or the window was closed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
}
Edit
The code is not displaying the white triangle its just opens the window and shows empty black.
Also at first I added the code I messed around with, which is missing part of the code. However it still does not produce the white triangle.
Do I need to use a shader for OpenGL 3.3?
You need to use a shader or you won't see anything. If you do see something then that is unspecified behavior. Which highly depend on each individual driver.
You could try fiddling with the clear color and clear the screen:
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
The tutorial was able to get a white triangle on a black background. On my computer the opposite was the case.
The tutorial also points out:
If you’re on lucky, you can see the result (don’t panic if you don’t)
So read the rest of the tutorial, it also includes a shader.
I am working through a book on OpenGL, and I am stuck on the third chapter. The following code should open a window and draw a single dot in the center.
The window shows up, glClearColor and glClear seem to be working, but no dot. This answer leads me do believe I may need to compile and link simple vertex and fragment shaders.
The book has made no mention of them, but do I need shaders? Am I missing something else with how I am drawing the dot? Something with the window? The book uses some WIN32 thing for windowing, I am using Linux Mint 17.1
Code below:
#include <stdlib.h>
#include <stdio.h>
#include <GL/glew.h>
#include <GL/gl.h>
#include <GLFW/glfw3.h>
#include <GL/glut.h>
#define BUFFER_OFFSET(i) ( (char*)NULL + (i) )
static void error_callback(int error, const char* description) {
fputs(description, stderr);
}
static void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods) {
if (key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
int main(void) {
GLFWwindow* window;
glfwSetErrorCallback(error_callback);
if (!glfwInit()) {
exit(EXIT_FAILURE);
}
window = glfwCreateWindow(640, 480, "Simple example", NULL, NULL);
if (!window) {
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwMakeContextCurrent(window);
glfwSwapInterval(1);
glfwSetKeyCallback(window, key_callback);
if (glewInit()) {
glfwTerminate();
exit(EXIT_FAILURE);
}
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
float ratio;
int width, height;
glfwGetFramebufferSize(window, &width, &height);
ratio = width / (float) height;
glViewport(0, 0, width, height);
GLfloat vertex [] = {0.0f, 0.0f, -2.0f};
GLuint m_vertexBuffer;
glGenBuffers(1, &m_vertexBuffer);
glBindBuffer(GL_ARRAY_BUFFER, m_vertexBuffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(GLfloat) * 3, &vertex[0], GL_STATIC_DRAW);
while (!glfwWindowShouldClose(window)) {
glClear(GL_COLOR_BUFFER_BIT);
glBindBuffer(GL_ARRAY_BUFFER, m_vertexBuffer);
glVertexPointer(3, GL_FLOAT, 0, BUFFER_OFFSET(0));
glPointSize(10.0f);
glEnableClientState(GL_VERTEX_ARRAY);
glDrawArrays(GL_POINTS, 0, 1);
glDisableClientState(GL_VERTEX_ARRAY);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
Also, I am compiling with g++ main.cpp -lGL -lglfw -lGLU -lGLEW
I have OpenGL 4.5 installed, but am opening a fixed-pipeline context.
From your code:
GLfloat vertex [] = {0.0f, 0.0f, -2.0f};
Since you use the fixed function pipelione and never set any matrices, both the projection and the modelView matrix will be left at the default, and the point (0, 0, -2) is outside fo the viewing volume. Try (0, 0, 0) instead.
If the book uses excatly this code with exactly that vertex coordinates, you'd better search for another one. You should also consider learning "modern" GL instead. The fixed-funciton pipeline basically has been superseeded by shaders in GL2.0 a decade ago.
I have problem in seeing my triangle in a black window. The window though just closes as it opens, and does not allow me to see what's going on inside it. I have seen somewhere on the net that i has something to do with the minor-versioning, of which I have no clue how to check on my VGA card.
Here is my full code:
#define GLEW_STATIC
#include <stdio.h>
#include <GL\glew.h>
#include <GL\GLU.h>
#include <GL\glut.h>
#include <glm.hpp>
#include <GL\gl.h>
#include <GLFW\glfw3.h>
using namespace glm;
using namespace std;
int main()
{
glfwWindowHint(GLFW_SAMPLES, 4); // anti aliasing
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); // openGL major version to be 3
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); // minor set to 3, which makes the version 3.3
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // for MAC OS only
glfwWindowHint(GLFW_OPENGL_COMPAT_PROFILE, GLFW_OPENGL_CORE_PROFILE); //avoid using old openGL
GLFWwindow* window;
window = glfwCreateWindow(1024, 768, "First Window in OpenGL", NULL, NULL);
if (window == NULL)
{
fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
static const GLfloat g_vertex_buffer_data[] = {
-1.0f, -1.0f, 0.0f,
1.0f, -1.0f, 0.0,
0.0f, 1.0f, 0.0f
};
// identifying our vertex buffer
GLuint vertexbuffer;
glGenBuffers(1, &vertexbuffer);
// The following commands will talk about our 'vertexbuffer' buffer
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
// Give our vertices to OpenGL.
glBufferData(GL_ARRAY_BUFFER, sizeof(g_vertex_buffer_data), g_vertex_buffer_data, GL_STATIC_DRAW);
glewExperimental = true; // Needed in core profile
if (glewInit() != GLEW_OK)
{
fprintf(stderr, "Failed to initialize GLEW\n");
return -1;
}
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
do{
// 1rst attribute buffer : vertices
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 3); // Starting from vertex 0; 3 vertices total -> 1 triangle
glDisableVertexAttribArray(0);
glfwSwapBuffers(window);
glfwPollEvents();
}
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
return 0;
}
Can anyone please help me on keeping the window open and just close it with ESC key as it is expected from the code?
The reason the window closes immediately is because you have a segmentation fault.
This is most likely due to failing to initialize things in proper order.
Initialize GLFW before running any glfw function calls, like so:
// Initialise GLFW
if( !glfwInit() ) {
fprintf( stderr, "Failed to initialize GLFW\n" );
return -1;
}
glfwWindowHint(GLFW_SAMPLES, 4); // anti aliasing
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3); // openGL major version to be 3
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 0); // minor set to 3, which makes the version 3.3
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); // for MAC OS only
glfwWindowHint(GLFW_OPENGL_COMPAT_PROFILE, GLFW_OPENGL_CORE_PROFILE); //avoid using old openGL
GLFWwindow* window;
window = gl
Then, move the initialization of glew to right after you create and set the GL context:
if (window == NULL) {
fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, they are not 3.3 compatible. Try the 2.1 version of the tutorials.\n");
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE; // Needed in core profile
const GLenum err = glewInit();
if (err != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
fprintf(stderr, "Error: %s\n", glewGetErrorString(err));
return -1;
}
GLuint VertexArrayID;
glGenVertexArrays(1, &VertexArrayID);
glBindVertexArray(VertexArrayID);
// ... rest of code follows
I have tested it, and your code should work fine now, and the window won't close until you hit ESC.
When I try to run this simple OpenGL test program I get a segmentation fault. This only happens when I create the context using the core profile flag. If I use the compatibility profile flag, the program runs without issue.
Edit: I checked the pointer to the function glGenVertexArrays and it returned NULL. If glfwCreateWindow doesn't return NULL, and glGetString(GL_VERSION) confirms that the context is version 4.3 and glewInit returns GLEW_OK then why is glGenVertexArrays == NULL?
My OS is Windows 7 64-bit and my GPU is a Nvidia GTX 760 with 331.82 WHQL driver.
Code:
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <stdlib.h>
#include <stdio.h>
#define GLSL(src) "#version 430 core\n" #src
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if(key == GLFW_KEY_ESCAPE && action == GLFW_PRESS)
glfwSetWindowShouldClose(window, GL_TRUE);
}
GLuint create_program(const char* vertex_source, const char* fragment_source)
{
GLuint vs = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vs, 1, &vertex_source, NULL);
glCompileShader(vs);
unsigned int fs = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fs, 1, &fragment_source, NULL);
glCompileShader(fs);
GLuint shader_program = glCreateProgram();
glAttachShader(shader_program, fs);
glAttachShader(shader_program, vs);
glLinkProgram(shader_program);
return shader_program;
}
const char* vertex_shader = GLSL(
layout(location = 0) in vec3 vertex_position;
void main()
{
gl_Position = vec4(vertex_position, 1.0);
}
);
const char* fragment_shader = GLSL(
out vec4 frag_color;
void main()
{
frag_color = vec4(1.0, 0.0, 0.0, 1.0);
}
);
int main(int argc, char* argv[])
{
if(!glfwInit())
exit(EXIT_FAILURE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 3);
//if we set GLFW_OPENGL_PROFILE to GLFW_OPENGL_CORE_PROFILE
//instead of GLFW_OPENGL_COMPAT_PROFILE the program will
//segfault at line 98, call to glGenVertexArrays
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
GLFWwindow* window = glfwCreateWindow(512, 512, "OpenGL", NULL, NULL);
if(!window)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
glfwSetKeyCallback(window, key_callback);
glfwMakeContextCurrent(window);
GLenum glewError = glewInit();
if(glewError != GLEW_OK)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
printf("OpenGL Version: %s\n\n", glGetString(GL_VERSION));
float position[] =
{
1.0f, 1.0f, 0.0f,
-1.0f, 1.0f, 0.0f,
1.0f, -1.0f, 0.0f,
-1.0f, -1.0f, 0.0f
};
unsigned short indices[] =
{
1, 0, 2,
3, 1, 2
};
GLuint vao = 0;
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
GLuint index_buffer = 0;
GLuint vertex_buffer = 0;
glGenBuffers(1, &index_buffer);
glGenBuffers(1, &vertex_buffer);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, index_buffer);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), &indices, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, vertex_buffer);
glBufferData(GL_ARRAY_BUFFER, sizeof(position), &position, GL_STATIC_DRAW);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0);
glEnableVertexAttribArray(0);
GLuint shader_program = create_program(vertex_shader, fragment_shader);
glUseProgram(shader_program);
while(!glfwWindowShouldClose(window))
{
glClear(GL_COLOR_BUFFER_BIT);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, NULL);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
You're actually getting an Invalid Enum [1280] from OpenGL, after you call glewInit() the easiest fix is to do.
glewExperimental = GL_TRUE;
Before you call glewInit() thereby.
glewExperimental = GL_TRUE;
GLenum glewError = glewInit();
if (glewError != GLEW_OK)
{
glfwTerminate();
exit(EXIT_FAILURE);
}
Why? well it has to do with how GLEW loads extensions, functions, etc. by default GLEW will set some function, etc. as unsupported, thereby to get around that you need to set glewExperimental = GL_TRUE; or else it will generate an error like you where getting.
Experimental Drivers
GLEW obtains information on the supported extensions from the graphics driver. Experimental or pre-release drivers, however, might not report every available extension through the standard mechanism, in which case GLEW will report it unsupported. To circumvent this situation, the glewExperimental global switch can be turned on by setting it to GL_TRUE before calling glewInit(), which ensures that all extensions with valid entry points will be exposed.
Source
Extra
Always remember to check for OpenGL Errors, they usually tell you what's wrong and/or help you find the problem.
GLenum error = glGetError();
if (error != GL_NO_ERROR)
{
std::cout << "OpenGL Error: " << error << std::endl;
}
You can read about the different errors here.
Output the value of glGenVertexArrays. It is likely 0 or null right now. I've found I have to setup certain functions first even in the core profile.
I've encountered similar issues with glew not properly initializing all the function pointers if you create a core profile. I've meant to look into the glew implementation to see why this is the case but haven't gotten around it it, since the compatibility context is serving me OK.
However, one possibility you could try is to use GLFW to create a compatibility context, initialize GLEW, destroy the GLFW window and create a new one with the core context.
I have the following program, and I cannot figure out why the glColor4f() call is not resulting in a red line from the output of glDrawArrays() can anyone tell me what I'm doing wrong? A white line is being output for some reason. Is glColor4f() no longer supported in modern versions of GL. If that's the case, what's the proper way to color lines like this?
All of my googling led me to find that I should be calling:
glDisable(GL_LIGHTING);
glDisable(GL_TEXTURE_2D);
glEnable(GL_COLOR_MATERIAL);
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
However, after adding those calls I don't see any difference.
#include <vector>
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <glm/glm.hpp>
int main(int argc, char *argv[]) {
if(!glfwInit()) {
return EXIT_FAILURE;
}
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
GLFWwindow* window = glfwCreateWindow(800, 600, "Line Intersection", NULL, NULL);
if(!window) {
glfwTerminate();
return EXIT_FAILURE;
}
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE;
if(glewInit() != GLEW_OK) {
glfwTerminate();
return EXIT_FAILURE;
}
if(!GLEW_VERSION_3_2) {
glfwTerminate();
return EXIT_FAILURE;
}
static const glm::vec3 arr[] = {
glm::vec3(0,0,0),
glm::vec3(0,0.8f,0)
};
const glm::vec3* first = arr;
const glm::vec3* last = arr + sizeof(arr) / sizeof(arr[0]);
std::vector<glm::vec3> vertexBufferData (first, last);
static GLuint vertexBufferId;
glGenBuffers(1, &vertexBufferId);
glBindBuffer(GL_ARRAY_BUFFER, vertexBufferId);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertexBufferData[0]) * vertexBufferData.size(), &vertexBufferData[0], GL_STATIC_DRAW);
GLuint vertexArrayId;
glGenVertexArrays(1, &vertexArrayId);
glBindVertexArray(vertexArrayId);
glDisable(GL_LIGHTING);
glDisable(GL_TEXTURE_2D);
glEnable(GL_COLOR_MATERIAL);
glColorMaterial(GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE);
while(!glfwWindowShouldClose(window)){
glClearColor(0, 0, 0, 1);
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glColor4f(1.0f,0.0f,0.0f,1.0f);
glDrawArrays(GL_LINES, 0, vertexBufferData.size());
glDisableVertexAttribArray(0);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwTerminate();
return EXIT_SUCCESS;
}
You've selected a Core profile.
There are no freebies in Core, you must provide a vertex and fragment shader.