OpenGL triangle not appearing - c++

Hello everyone im trying to learn openGL using shaders so i made a triangle that changes its color while it moves but for some reason the trigle does not appear in the window, just a black background , it doesn't crash or show any kind of error, i believe the libraries set up is ok since i've used them before.
Im using CLion on Ubuntu 16.04. Here is my code so hopefully you can check it out and give me a hand. Thanks!
#define GLEW_STATIC
#include <stdlib.h>
#include <stdio.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <iostream>
int CurrentWidth = 800,
CurrentHeight = 600,
WindowHandle = 0;
GLuint
VertexShaderId,
FragmentShaderId,
ProgramId;
GLuint s_vertexLoc, s_colorLoc , s_factorLoc;
GLfloat runner = 0.0;
const GLchar* VertexShader =
{
"#version 150\n"
"attribute vec3 in_vertex;"
"attribute vec3 in_color;"
"uniform float factor;"
"varying vec3 intp_color;"
"void main(void)"
"{"
"intp_color = in_color;"
"gl_Position = vec4( in_vertex , 1.0 ) ;"
"gl_Position.y += factor; "
"}"
};
//Telling every single pixel is going to be red
const GLchar* FragmentShader =
{
"#version 150\n"
"uniform float factor;"
"varying vec3 intp_color;"
"void main(void){"
"gl_FragColor = vec4( intp_color , 1.0) * factor;"
"}"
};
// for. dec.
void ResizeFunction(int, int);
void RenderFunction(void);
void IdleFunction(void);
void CreateShaders(void);
// set up an array for the geometry of the object
GLfloat Vertices[] = {
-0.5f , -0.2f , 0.0f, // point A - x , y , z
0.5f , -0.2f , 0.0f, // point B - x , y , z
0.0f , 0.8f , 0.0f // point C - x , y , z
};
GLfloat Colors[] = {
1.0f , 0.0f , 0.0f, // point A - x , y , z
0.0f , 1.0f , 0.0f, // point B - x , y , z
0.0f , 0.0f , 1.0f // point C - x , y , z
};
int main( int argc , char* argv[] )
{
glutInit( &argc , argv);
//Target version 3.1
glutInitContextVersion(3 , 1 );
glutInitWindowSize(CurrentWidth, CurrentHeight);
glutInitDisplayMode( GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA );
WindowHandle = glutCreateWindow( "OpenGL - Shader example" );
glutReshapeFunc( ResizeFunction );
glutDisplayFunc( RenderFunction );
glutIdleFunc( IdleFunction );
/// init GLEW
GLenum GlewInitResult;
GlewInitResult = glewInit();
if (GLEW_OK != GlewInitResult)
exit(EXIT_FAILURE);
/// Create our shaders
CreateShaders();
glutMainLoop();
exit(EXIT_SUCCESS);
}
void ResizeFunction(int Width, int Height)
{
CurrentWidth = Width;
CurrentHeight = Height;
glViewport( 0, 0, CurrentWidth, CurrentHeight );
glClearColor( 0.0f, 0.0f, 0.0f, 0.0f );
}
void RenderFunction(void)
{
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
//We enable the shader variable
glEnableVertexAttribArray( s_vertexLoc );
glEnableVertexAttribArray( s_colorLoc );
//How to send data to the variable:
//( Where to send the data , how its grouped , data type , dont normalize the data,
// there is no offset, you find it here)
glVertexAttribPointer( s_vertexLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Vertices );
glVertexAttribPointer ( s_colorLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Colors);
glUniform1f( s_factorLoc , runner );
runner += 0.01;
if(runner > 1.0f)
runner = -1.0f;
glDrawArrays( GL_TRIANGLES , 0 , 3 );
glDisableVertexAttribArray( s_vertexLoc );
glDisableVertexAttribArray( s_colorLoc );
glutSwapBuffers();
}
void IdleFunction(void)
{
glutPostRedisplay();
}
//Error checking
void printLog(GLuint obj)
{
int infologLength = 0;
int maxLength;
if( glIsShader( obj ) )
glGetShaderiv( obj , GL_INFO_LOG_LENGTH , &maxLength );
else
glGetProgramiv( obj, GL_INFO_LOG_LENGTH, &maxLength);
char infoLog[1255];
if ( glIsShader(obj) )
glGetShaderInfoLog( obj, maxLength, &infologLength, infoLog );
else
glGetProgramInfoLog( obj, maxLength, &infologLength, infoLog );
if ( infologLength > 0 )
printf( "\n Error detail: %s\n" , infoLog );
}
void CreateShaders(void)
{
GLenum ErrorCheckValue = glGetError();
if( glCreateShader )
printf(" ---- shader suppot ok ---");
else
{
printf(" ---- no shader support ---");
return ;
}
///The VERTEX shader is created (tell it what it is)
VertexShaderId = glCreateShader( GL_VERTEX_SHADER );
//(shader we want to set the source, how many are they, the source of the shader , )
glShaderSource( VertexShaderId , 1 , &VertexShader , nullptr );
//Compile the shader
glCompileShader( VertexShaderId );
//Error checking
printLog( VertexShaderId );
///The FRAGMENT shader is created (tell it what it is)
FragmentShaderId = glCreateShader( GL_FRAGMENT_SHADER );
//(shader we want to set the source, how many are they, the source of the shader , )
glShaderSource( FragmentShaderId , 1 , &FragmentShader , nullptr );
//Compile the shader
glCompileShader( FragmentShaderId );
//Error checking
printLog( FragmentShaderId );
///Program Object links both shaders
ProgramId = glCreateProgram();
//Attach the compiled shaders to the program
glAttachShader( ProgramId , VertexShaderId );
glAttachShader( ProgramId , FragmentShaderId );
//Links the shaders to the program
glLinkProgram( ProgramId );
//Error checking
printLog( ProgramId );
//Uses the program to render
glUseProgram( ProgramId );
//( check this program , for this variable and return it)
s_vertexLoc = glGetAttribLocation( ProgramId , "in_vertex" );
s_colorLoc = glGetAttribLocation( ProgramId , "in_color" );
s_factorLoc = glGetUniformLocation( ProgramId , "factor" );
}

You have several issues.
First, using OpenGL 3.1 (GLSL 1.30) is discouraged. Try with at least
3.2 (GLSL 1.50) and ask for that Core Profile in your glut initialization. Then replace "attribute" with "in" in your
vertex shader and "varying" with "in" or "out" depending
if this var is going to be used as an input or an output.
Second. This
//How to send data to the variable:
//( Where to send the data , how its grouped , data type , dont normalize the data,
// there is no offset, you find it here)
glVertexAttribPointer( s_vertexLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Vertices );
glVertexAttribPointer ( s_colorLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Colors);
is not true. That last value is a byte offset into the buffer object's data store, not the data itself.
Third: You need a buffer for your vertices coordinates. Use
glGenBuffers(), glBufferData(), and glBindBuffer()
If I were in your case I'd follow some tutorial on "modern OpenGL". Search the web, there are a lot.

Related

code error : Thread 1: EXC_BAD_ACCESS (code=1, address=0x0)

I'm trying to write a code in C ++ using OpenGL (I use GLFW and GLEW libraries). Here is the code:
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#define numVAOs 1
GLuint renderingProgram;
GLuint vao[numVAOs];
GLuint createShaderProgram(){
const char*vshaderSource =
"#version 430 \n"
"void main(void) \n"
"{gl_Position = vec4(0.0,0.0,0.0,1.0)};";
const char*fshaderSource =
"#version 430 \n"
"out vec4 color; \n"
"void main(void) \n"
"{gl_Position = vec4(0.0,0.0,1.0,1.0)};";
GLuint vShader = glCreateShader(GL_VERTEX_SHADER);
GLuint fShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(vShader,1,&vshaderSource,NULL);
glShaderSource(fShader,1,&fshaderSource,NULL);
glCompileShader(vShader);
glCompileShader(fShader);
GLuint vfProgram = glCreateProgram();
glAttachShader(vfProgram,vShader);
glAttachShader(vfProgram,fShader);
glLinkProgram(vfProgram);
return vfProgram;
}
int main()
{
glfwInit();
// Define version and compatibility settings
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE,GLFW_OPENGL_CORE_PROFILE);
glfwWindowHint(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);
glfwWindowHint(GLFW_RESIZABLE, GL_FALSE);
glGenVertexArrays(numVAOs,vao);
glBindVertexArray(vao[0]);
glUseProgram(renderingProgram);
glDrawArrays(GL_POINT,0,1);
// Create OpenGL window and context
GLFWwindow* window = glfwCreateWindow(1430, 800, "Davide", NULL, NULL);
glfwMakeContextCurrent(window);
// Check for window creation failure
if (!window)
{
// Terminate GLFW
glfwTerminate();
return 0;
}
glewExperimental = GL_TRUE; glewInit();
// Event loop
while(!glfwWindowShouldClose(window))
{
// Clear the screen to black
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); glClear(GL_COLOR_BUFFER_BIT);
glfwSwapBuffers(window);
glfwPollEvents();
}
// Terminate GLFW
glfwTerminate(); return 0;
}
Unfortunately when I run the code I get an error code:
Thread 1: EXC_BAD_ACCESS (code = 1, address = 0x0).
It should appear a black screen with a dot in the center.
Multiple issues:
You're calling GL functions before you have a current GL context:
glGenVertexArrays(numVAOs,vao); // nope
glBindVertexArray(vao[0]); // nope
glUseProgram(renderingProgram); // nope
glDrawArrays(GL_POINT,0,1); // nope
// Create OpenGL window and context
GLFWwindow* window = glfwCreateWindow(1430, 800, "Davide", NULL, NULL);
glfwMakeContextCurrent(window);
Move those calls to after glfwMakeContextCurrent() (and your GL loader init) so they have a GL context to operate on (and check if window is NULL before using it in glfwMakeContextCurrent()):
// Create OpenGL window and context
GLFWwindow* window = glfwCreateWindow(1430, 800, "Davide", NULL, NULL);
// Check for window creation failure
if (!window)
{
// Terminate GLFW
glfwTerminate();
return 0;
}
glfwMakeContextCurrent(window);
glewExperimental = GL_TRUE; glewInit();
glGenVertexArrays(numVAOs,vao);
glBindVertexArray(vao[0]);
glUseProgram(renderingProgram);
glDrawArrays(GL_POINT,0,1);
You should also initialize renderingProgram before glUseProgram()ing it, perhaps with createShaderProgram()?
gl_Position isn't valid in a fragment shader. You're thinking of gl_Color or a user-defined output like your color output.
If you're requesting a GL 3.2 context then #version 430 isn't valid either. Either upgrade to GL 4.3 or downgrade your shaders to #version 150.
GL_POINT isn't a valid input to glDrawArrays(). You're thinking of GL_POINTS.
Call glDrawArrays() each frame instead of once at the beginning of your draw-loop so you have a chance to actually see your point.
GLSL statements require a semicolon after them.
Invalid:
void main(void)
{
gl_Position = vec4(0.0,0.0,0.0,1.0)
};
Valid:
void main(void)
{
gl_Position = vec4(0.0,0.0,0.0,1.0); // note the semicolon
} // note the lack of semicolon
All together:
#include <GL/glew.h>
#include <GLFW/glfw3.h>
#include <iostream>
void CheckStatus( GLuint obj, bool isShader )
{
GLint status = GL_FALSE, log[ 1 << 11 ] = { 0 };
( isShader ? glGetShaderiv : glGetProgramiv )( obj, isShader ? GL_COMPILE_STATUS : GL_LINK_STATUS, &status );
if( status == GL_TRUE ) return;
( isShader ? glGetShaderInfoLog : glGetProgramInfoLog )( obj, sizeof( log ), NULL, (GLchar*)log );
std::cerr << (GLchar*)log << "\n";
std::exit( EXIT_FAILURE );
}
void AttachShader( GLuint program, GLenum type, const char* src )
{
GLuint shader = glCreateShader( type );
glShaderSource( shader, 1, &src, NULL );
glCompileShader( shader );
CheckStatus( shader, true );
glAttachShader( program, shader );
glDeleteShader( shader );
}
const char* const vert = 1 + R"GLSL(
#version 150
void main()
{
gl_Position = vec4(0.0,0.0,0.0,1.0);
}
)GLSL";
const char* const frag = 1 + R"GLSL(
#version 150
out vec4 color;
void main()
{
color = vec4(0.0,0.0,1.0,1.0);
}
)GLSL";
#define numVAOs 1
GLuint renderingProgram;
GLuint vao[ numVAOs ];
int main()
{
glfwInit();
// Define version and compatibility settings
glfwWindowHint( GLFW_CONTEXT_VERSION_MAJOR, 3 );
glfwWindowHint( GLFW_CONTEXT_VERSION_MINOR, 2 );
glfwWindowHint( GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE );
glfwWindowHint( GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE );
glfwWindowHint( GLFW_RESIZABLE, GL_FALSE );
// Create OpenGL window and context
GLFWwindow* window = glfwCreateWindow( 320, 240, "Davide", NULL, NULL );
// Check for window creation failure
if( !window )
{
// Terminate GLFW
glfwTerminate();
return 0;
}
glfwMakeContextCurrent( window );
glewExperimental = GL_TRUE;
glewInit();
glGenVertexArrays( numVAOs, vao );
glBindVertexArray( vao[ 0 ] );
GLuint renderingProgram = glCreateProgram();
AttachShader( renderingProgram, GL_VERTEX_SHADER, vert );
AttachShader( renderingProgram, GL_FRAGMENT_SHADER, frag );
glLinkProgram( renderingProgram );
CheckStatus( renderingProgram, false );
glUseProgram( renderingProgram );
// Event loop
while( !glfwWindowShouldClose( window ) )
{
// Clear the screen to black
glClearColor( 0.0f, 0.0f, 0.0f, 1.0f );
glClear( GL_COLOR_BUFFER_BIT );
glDrawArrays( GL_POINTS, 0, 1 );
glfwSwapBuffers( window );
glfwPollEvents();
}
// Terminate GLFW
glfwTerminate();
return 0;
}

Can't render to GtkGLArea

I try to render a triangle to a GtkGLArea but I only see the color with which I cleared the frame with glClearColor().
Please note:
I know that the triangle is so big that it would fill the whole screen, but I also tried smaller ones and it didn't work either.
I also know that I should normally not create the program before each rendering, I only did it here to keep the example short.
I'm fairly certain that the error is neither in LoadShaders nor in the shaders themselves because I've tried the exact same functions with GLFW and they've worked fine their.
Things which might cause the problem:
I'm not flushing the frame currently or swapping framebuffers because the documentation (https://developer.gnome.org/gtk3/stable/GtkGLArea.html) doesn't mention that I have to. I've tried glFlush() but it didn't help either.
I assume that the screen coordinates go from -1 to 1 on all axis like in normal OpenGL. Maybe that's wrong but I couldn't find anything in the documentation there either.
Could somebody help me?
This is how I compile it:
g++ -O3 -s -o main main.cpp -isystem include -Llibs -DNDEBUG `pkg-config --cflags gtk+-3.0` `pkg-config --libs gtk+-3.0` -lepoxy -lm
This is my code:
#include <gtk/gtk.h>
#include <epoxy/gl.h>
#include <epoxy/glx.h>
#include <iostream>
#include <vector>
GLuint LoadShaders(char const* vertex, char const* fragment){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
glShaderSource(VertexShaderID, 1, &vertex , NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> VertexShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
// Compile Fragment Shader
glShaderSource(FragmentShaderID, 1, &fragment , NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> FragmentShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
// Link the program
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> ProgramErrorMessage(InfoLogLength+1);
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
printf("%s\n", &ProgramErrorMessage[0]);
}
glDetachShader(ProgramID, VertexShaderID);
glDetachShader(ProgramID, FragmentShaderID);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
char const* vertShader = R"GLSL(
#version 330 core
void main(){
gl_Position.z = 0.0;
gl_Position.w = 1.0;
if (0 == gl_VertexID) {
gl_Position.x = -100.0;
gl_Position.y = -100.0;
}
if (2 == gl_VertexID) {
gl_Position.x = 0.0;
gl_Position.y = 100.0;
}
if (1 == gl_VertexID) {
gl_Position.x = 100.0;
gl_Position.y = -100.0;
}
}
)GLSL";
char const* fragShader = R"GLSL(
#version 330 core
layout(location = 0) out vec4 color;
void main(){
color = vec4(1.0, 0.0, 0.0, 1.0);
}
)GLSL";
gboolean
render(GtkGLArea*, GdkGLContext*, gpointer) {
glClearColor(0.5, 0.5, 0.5, 0);
glClear(GL_COLOR_BUFFER_BIT);
GLuint programID;
programID = LoadShaders(vertShader, fragShader);
glUseProgram(programID);
glDrawArrays(GL_TRIANGLES, 0, 3);
//glFlush();
glDeleteProgram(programID);
return TRUE;
}
int
main(int argc, char** argv) {
gtk_init(&argc, &argv);
auto window{gtk_window_new(GTK_WINDOW_TOPLEVEL)};
auto glWidget{gtk_gl_area_new()};
gtk_container_add(GTK_CONTAINER(window), glWidget);
g_signal_connect (glWidget, "render", G_CALLBACK(render), nullptr);
gtk_widget_show_all(window);
gtk_main();
return EXIT_SUCCESS;
}
Two things I can think of:
You aren't requesting a Core context from the OS. Looks like you have to override create-context & create + return a gdk_gl_context_set_required_version'd GdkGLContext.
When you do get a Core context up & going I'm pretty sure you still need a VAO bound even if you're generating geometry entirely within your vertex shader.
RE: missing VAOs:
With this GLFW program and the VAO creation/bind commented out:
#include <glad/glad.h>
#define GLFW_INCLUDE_NONE
#include <GLFW/glfw3.h>
#include <iostream>
void CheckStatus( GLuint obj, bool isShader )
{
GLint status = GL_FALSE, log[ 1 << 11 ] = { 0 };
( isShader ? glGetShaderiv : glGetProgramiv )( obj, isShader ? GL_COMPILE_STATUS : GL_LINK_STATUS, &status );
( isShader ? glGetShaderInfoLog : glGetProgramInfoLog )( obj, sizeof( log ), NULL, (GLchar*)log );
if( status == GL_TRUE ) return;
std::cerr << (GLchar*)log << "\n";
std::exit( EXIT_FAILURE );
}
void AttachShader( GLuint program, GLenum type, const char* src )
{
GLuint shader = glCreateShader( type );
glShaderSource( shader, 1, &src, NULL );
glCompileShader( shader );
CheckStatus( shader, true );
glAttachShader( program, shader );
glDeleteShader( shader );
}
const char* vert = 1 + R"GLSL(
#version 330 core
void main(){
gl_Position.z = 0.0;
gl_Position.w = 1.0;
if (0 == gl_VertexID) {
gl_Position.x = -100.0;
gl_Position.y = -100.0;
}
if (2 == gl_VertexID) {
gl_Position.x = 0.0;
gl_Position.y = 100.0;
}
if (1 == gl_VertexID) {
gl_Position.x = 100.0;
gl_Position.y = -100.0;
}
}
)GLSL";
const char* frag = 1 + R"GLSL(
#version 330 core
layout(location = 0) out vec4 color;
void main(){
color = vec4(1.0, 0.0, 0.0, 1.0);
}
)GLSL";
int main( int, char** )
{
glfwSetErrorCallback( []( int, const char* desc ) { std::cerr << desc << "\n"; std::exit( EXIT_FAILURE ); } );
glfwInit();
glfwWindowHint( GLFW_CONTEXT_VERSION_MAJOR, 3 );
glfwWindowHint( GLFW_CONTEXT_VERSION_MINOR, 3 );
glfwWindowHint( GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE );
glfwWindowHint( GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE );
GLFWwindow* window = glfwCreateWindow( 640, 480, "GLFW", NULL, NULL );
glfwMakeContextCurrent( window );
gladLoadGLLoader( (GLADloadproc)glfwGetProcAddress );
//GLuint vao = 0;
//glGenVertexArrays( 1, &vao );
//glBindVertexArray( vao );
GLuint prog = glCreateProgram();
AttachShader( prog, GL_VERTEX_SHADER, vert );
AttachShader( prog, GL_FRAGMENT_SHADER, frag );
glLinkProgram( prog );
CheckStatus( prog, false );
while( !glfwWindowShouldClose( window ) )
{
glfwPollEvents();
int w, h;
glfwGetFramebufferSize( window, &w, &h );
glViewport( 0, 0, w, h );
glClearColor( 0.5, 0.5, 0.5, 0 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glUseProgram( prog );
glDrawArrays( GL_TRIANGLES, 0, 3 );
glfwSwapBuffers( window );
}
glfwTerminate();
}
Running on Linux with Mesa 13.0.6's llvmpipe backend & the MESA_DEBUG=1 envvar gives me a grey window and this message on stdout:
Mesa: User error: GL_INVALID_OPERATION in glDrawArrays(no VAO bound)
Restoring the VAO gives the expected red window.

OpenGL shaders don't compile

The shaders in my OpenGL project don't compile. I have Ubuntu 16.04 LTS, using CLion. Didn't find any solution, that's why asking here.
Here is my errorlist:
ATTENTION: default value of option force_s3tc_enable overridden by environment.
ERROR::SHADER::VERTEX::COMPILATION_FAILED
0:1(1): error: syntax error, unexpected $end
ERROR::SHADER::FRAGMENT::COMPILATION_FAILED
0:1(1): error: syntax error, unexpected $end
ERROR::SHADER::PROGRAM::LINKING_FAILED
error: linking with uncompiled shadererror: linking with uncompiled shader
Here's my main.cpp code:
#include <iostream>
// GLEW
#define GLEW_STATIC
#include <GL/glew.h>
// GLFW
#include <GLFW/glfw3.h>
// Other includes
#include "Shader.h"
// Window dimensions
const GLuint WIDTH = 800, HEIGHT = 600;
// The MAIN function, from here we start the application and run the game loop
int main( )
{
// Init GLFW
glfwInit( );
// Set all the required options for GLFW
glfwWindowHint( GLFW_CONTEXT_VERSION_MAJOR, 3 );
glfwWindowHint( GLFW_CONTEXT_VERSION_MINOR, 3 );
glfwWindowHint( GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE );
glfwWindowHint( GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE );
glfwWindowHint( GLFW_RESIZABLE, GL_FALSE );
// Create a GLFWwindow object that we can use for GLFW's functions
GLFWwindow *window = glfwCreateWindow( WIDTH, HEIGHT, "LearnOpenGL", nullptr, nullptr );
int screenWidth, screenHeight;
glfwGetFramebufferSize( window, &screenWidth, &screenHeight );
if ( nullptr == window )
{
std::cout << "Failed to create GLFW window" << std::endl;
glfwTerminate( );
return EXIT_FAILURE;
}
glfwMakeContextCurrent( window );
// Set this to true so GLEW knows to use a modern approach to retrieving function pointers and extensions
glewExperimental = GL_TRUE;
// Initialize GLEW to setup the OpenGL Function pointers
if ( GLEW_OK != glewInit( ) )
{
std::cout << "Failed to initialize GLEW" << std::endl;
return EXIT_FAILURE;
}
// Define the viewport dimensions
glViewport( 0, 0, screenWidth, screenHeight );
// Build and compile our shader program
Shader ourShader( "core.vs", "core.frag" );
// Set up vertex data (and buffer(s)) and attribute pointers
GLfloat vertices[] =
{
// Positions // Colors
0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, // Bottom Right
-0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, // Bottom Left
0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f // Top
};
GLuint VBO, VAO;
glGenVertexArrays( 1, &VAO );
glGenBuffers( 1, &VBO );
// Bind the Vertex Array Object first, then bind and set vertex buffer(s) and attribute pointer(s).
glBindVertexArray( VAO );
glBindBuffer( GL_ARRAY_BUFFER, VBO );
glBufferData( GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW );
// Position attribute
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof( GLfloat ), ( GLvoid * ) 0 );
glEnableVertexAttribArray( 0 );
// Color attribute
glVertexAttribPointer( 1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof( GLfloat ), ( GLvoid * )( 3 * sizeof( GLfloat ) ) );
glEnableVertexAttribArray( 1 );
glBindVertexArray( 0 ); // Unbind VAO
// Game loop
while ( !glfwWindowShouldClose( window ) )
{
// Check if any events have been activiated (key pressed, mouse moved etc.) and call corresponding response functions
glfwPollEvents( );
// Render
// Clear the colorbuffer
glClearColor( 0.2f, 0.3f, 0.3f, 1.0f );
glClear( GL_COLOR_BUFFER_BIT );
// Draw the triangle
ourShader.Use( );
glBindVertexArray( VAO );
glDrawArrays( GL_TRIANGLES, 0, 3 );
glBindVertexArray(0);
// Swap the screen buffers
glfwSwapBuffers( window );
}
// Properly de-allocate all resources once they've outlived their purpose
glDeleteVertexArrays( 1, &VAO );
glDeleteBuffers( 1, &VBO );
// Terminate GLFW, clearing any resources allocated by GLFW.
glfwTerminate( );
return EXIT_SUCCESS;
}
Shader.h code:
#ifndef SHADER_H
#define SHADER_H
#include <string>
#include <fstream>
#include <sstream>
#include <iostream>
#include <GL/glew.h>
#include <cstring>
class Shader
{
public:
GLuint Program;
// Constructor generates the shader on the fly
Shader( const GLchar *vertexPath, const GLchar *fragmentPath )
{
// 1. Retrieve the vertex/fragment source code from filePath
std::string vertexCode;
std::string fragmentCode;
std::ifstream vShaderFile;
std::ifstream fShaderFile;
// ensures ifstream objects can throw exceptions:
vShaderFile.exceptions ( std::ifstream::badbit );
fShaderFile.exceptions ( std::ifstream::badbit );
try
{
// Open files
vShaderFile.open( vertexPath );
fShaderFile.open( fragmentPath );
std::stringstream vShaderStream, fShaderStream;
// Read file's buffer contents into streams
vShaderStream << vShaderFile.rdbuf( );
fShaderStream << fShaderFile.rdbuf( );
// close file handlers
vShaderFile.close( );
fShaderFile.close( );
// Convert stream into string
vertexCode = vShaderStream.str( );
fragmentCode = fShaderStream.str( );
}
catch ( std::ifstream::failure e )
{
std::cout << "ERROR::SHADER::FILE_NOT_SUCCESFULLY_READ" << std::endl;
}
const GLchar *vShaderCode = vertexCode.c_str( );
const GLchar *fShaderCode = fragmentCode.c_str( );
// 2. Compile shaders
GLuint vertex, fragment;
GLint success;
GLchar infoLog[512];
// Vertex Shader
vertex = glCreateShader( GL_VERTEX_SHADER );
glShaderSource( vertex, 1, &vShaderCode, NULL);
glCompileShader( vertex );
// Print compile errors if any
glGetShaderiv( vertex, GL_COMPILE_STATUS, &success );
if ( !success )
{
glGetShaderInfoLog( vertex, 512, NULL, infoLog );
std::cout << "ERROR::SHADER::VERTEX::COMPILATION_FAILED\n" << infoLog << std::endl;
}
// Fragment Shader
fragment = glCreateShader( GL_FRAGMENT_SHADER );
glShaderSource( fragment, 1, &fShaderCode, NULL);
glCompileShader( fragment );
// Print compile errors if any
glGetShaderiv( fragment, GL_COMPILE_STATUS, &success );
if ( !success )
{
glGetShaderInfoLog( fragment, 512, NULL, infoLog );
std::cout << "ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n" << infoLog << std::endl;
}
// Shader Program
this->Program = glCreateProgram( );
glAttachShader( this->Program, vertex );
glAttachShader( this->Program, fragment );
glLinkProgram( this->Program );
// Print linking errors if any
glGetProgramiv( this->Program, GL_LINK_STATUS, &success );
if (!success)
{
glGetProgramInfoLog( this->Program, 512, NULL, infoLog );
std::cout << "ERROR::SHADER::PROGRAM::LINKING_FAILED\n" << infoLog << std::endl;
}
// Delete the shaders as they're linked into our program now and no longer necessery
glDeleteShader( vertex );
glDeleteShader( fragment );
}
// Uses the current shader
void Use( )
{
glUseProgram( this->Program );
}
};
#endif
Here's my core.vs:
#version 330 core
layout (location = 0) in vec3 position;
layout (location = 1) in vec3 color;
layout (location = 2) in vec2 texCoord;
out vec3 ourColor;
out vec2 TexCoord;
void main()
{
gl_Position = vec4(position, 1.0f);
ourColor = color;
// We swap the y-axis by substracing our coordinates from 1. This is done because most images have the top y-axis inversed with OpenGL's top y-axis.
// TexCoord = texCoord;
TexCoord = vec2(texCoord.x, 1.0 - texCoord.y);
}
...and core.frag:
#version 330 core
in vec3 ourColor;
in vec2 TexCoord;
out vec4 color;
// Texture samplers
uniform sampler2D ourTexture1;
void main()
{
// Linearly interpolate between both textures (second texture is only slightly combined)
color = texture(ourTexture1, TexCoord);
}
I've also attached my CMakeLists.txt. Hope it helps:
cmake_minimum_required(VERSION 3.9)
project(STUDY_GL)
set(CMAKE_CXX_STANDARD 11)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -pthread -fpermissive")
find_package (OpenGL REQUIRED)
find_package (GLUT REQUIRED)
find_package (glfw3 REQUIRED)
find_library (glew REQUIRED)
find_library (glad REQUIRED)
include_directories(${/usr/include/GLFW/})
include_directories(${/usr/include/GL/})
file(GLOB SOURCE_FILES
*.cpp
*.h
)
add_executable(main.cpp ${SOURCE_FILES} Shader.h)
target_link_libraries (main.cpp ${OPENGL_LIBRARIES} ${GLUT_LIBRARIES} ${GLFW3_LIBRARIES} -lGL -lglfw -lglut -lGLEW)
I have an SOIL2 directory inside a project. Here's its link.
In general you code is fine, but you have to pass std::ifstream::failbit to std::ios::exceptions, because if std::ifstream::open fails, the failbit state flag is set.
vShaderFile.exceptions ( std::ifstream::failbit | std::ifstream::badbit );
fShaderFile.exceptions ( std::ifstream::failbit | std::ifstream::badbit );
try
{
vShaderFile.open( vertexPath );
fShaderFile.open( fragmentPath );
.....
}
catch ( std::ifstream::failure e )
{
std::cout << "ERROR::SHADER::FILE_NOT_SUCCESFULLY_READ" << std::endl;
}
I bet that your working directory is not proper set. Use an absolute file path to the shader files for debug reasons.
If the shader source files cannot be accessed, and your code does not throw any exception, then you try to compile empty strings. This causes the error messages.
By the way, since you do not use any texture in the code snippet posted in your question, you should do the following changes to your fragment shader:
// color = texture(ourTexture1, TexCoord); <--- skip
color = vec4(ourColor,1.0); <--- add
The error you see normally happens, when shader source without a null terminator at the end is passed to glShaderSource without specifying the source string length. Since typical files usually don't contain a null terminator, OpenGL will read past the end of the source string.
The solution is to pass in the exact length of the shader source string.

OpenGL Red Book 8th - First Example gives black box (Ubuntu)

After getting the first example program compiled from the 8th edition OpenGL Programming Guide after many alterations suggested by many sites, I am the proud owner of a black box - a vast improvement from the hours before, but it's embarrassing to admit that I've watch most of this with a debugger and unless something basic is wrong with the program, I have no clue where to look. Other OpenGL examples have run, but I'm trying to get the first example in the new book to run.
My box:
john#zerofluid:~/Downloads$ glxinfo | grep OpenGL
OpenGL vendor string: NVIDIA Corporation
OpenGL renderer string: GeForce GT 610/PCIe/SSE2
OpenGL version string: 4.3.0 NVIDIA 313.30
OpenGL shading language version string: 4.30 NVIDIA via Cg compiler
OpenGL extensions:
Linux zerofluid 3.8.0-26-generic #38-Ubuntu SMP Mon Jun 17 21:43:33 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux
(latest 13.04 Ubuntu)
I have a problem - it's way too much code to post here, but it is the first example of the book - as small as it gets and I have no idea of where the problem really is. It would be cool if someone actually wants to help - I'd feed it back to the author of the book. Yes, the LoadShader was found elsewhere and might be the problem, but it was supposed to be a solution. It's kind of hard to have any faith in the book when I can't get the first example to compile.
It can be found here:
https://github.com/kestess/opengl8thfirstexample.git
it's way too much code to post here
Not really.
Try this:
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <vector>
#include <iostream>
struct Program
{
static GLuint Load( const char* vert, const char* geom, const char* frag )
{
GLuint prog = glCreateProgram();
if( vert ) AttachShader( prog, GL_VERTEX_SHADER, vert );
if( geom ) AttachShader( prog, GL_GEOMETRY_SHADER, geom );
if( frag ) AttachShader( prog, GL_FRAGMENT_SHADER, frag );
glLinkProgram( prog );
CheckStatus( prog );
return prog;
}
private:
static void CheckStatus( GLuint obj )
{
GLint status = GL_FALSE, len = 10;
if( glIsShader(obj) ) glGetShaderiv( obj, GL_COMPILE_STATUS, &status );
if( glIsProgram(obj) ) glGetProgramiv( obj, GL_LINK_STATUS, &status );
if( status == GL_TRUE ) return;
if( glIsShader(obj) ) glGetShaderiv( obj, GL_INFO_LOG_LENGTH, &len );
if( glIsProgram(obj) ) glGetProgramiv( obj, GL_INFO_LOG_LENGTH, &len );
std::vector< char > log( len, 'X' );
if( glIsShader(obj) ) glGetShaderInfoLog( obj, len, NULL, &log[0] );
if( glIsProgram(obj) ) glGetProgramInfoLog( obj, len, NULL, &log[0] );
std::cerr << &log[0] << std::endl;
exit( -1 );
}
static void AttachShader( GLuint program, GLenum type, const char* src )
{
GLuint shader = glCreateShader( type );
glShaderSource( shader, 1, &src, NULL );
glCompileShader( shader );
CheckStatus( shader );
glAttachShader( program, shader );
glDeleteShader( shader );
}
};
#define GLSL(version, shader) "#version " #version "\n" #shader
const char* vert = GLSL
(
400 core,
layout( location = 0 ) in vec4 vPosition;
void main()
{
gl_Position = vPosition;
}
);
const char* frag = GLSL
(
400 core,
out vec4 fColor;
void main()
{
fColor = vec4( 0.0, 0.0, 1.0, 1.0 );
}
);
enum VAO_IDs { Triangles, NumVAOs };
enum Buffer_IDs { ArrayBuffer, NumBuffers };
enum Attrib_IDs { vPosition = 0 };
GLuint VAOs[NumVAOs];
GLuint Buffers[NumBuffers];
const GLuint NumVertices = 6;
void init(void)
{
glGenVertexArrays(NumVAOs, VAOs);
glBindVertexArray(VAOs[Triangles]);
GLfloat vertices[NumVertices][2] = {
{ -0.90, -0.90 }, // Triangle 1
{ 0.85, -0.90 },
{ -0.90, 0.85 },
{ 0.90, -0.85 }, // Triangle 2
{ 0.90, 0.90 },
{ -0.85, 0.90 }
};
glGenBuffers(NumBuffers, Buffers);
glBindBuffer(GL_ARRAY_BUFFER, Buffers[ArrayBuffer]);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
GLuint program = Program::Load( vert, NULL, frag );
glUseProgram(program);
glVertexAttribPointer(vPosition, 2, GL_FLOAT, GL_FALSE, 0, (void*)(0) );
glEnableVertexAttribArray(vPosition);
}
void display(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glBindVertexArray(VAOs[Triangles]);
glDrawArrays(GL_TRIANGLES, 0, NumVertices);
glutSwapBuffers();
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE );
glutInitWindowSize(512, 512);
glutInitContextVersion(4, 0);
glutInitContextProfile(GLUT_CORE_PROFILE);
glutCreateWindow(argv[0]);
glewExperimental = GL_TRUE;
if( GLEW_OK != glewInit() )
exit(EXIT_FAILURE);
init();
glutDisplayFunc(display);
glutMainLoop();
}
No reason to request a 4.3 context if you're using #version 400 core.

Bind SDL2 texture to GLSL shader

I'm trying to bind a sdl2 texture to a glsl shader though I'm not entirely sure how? I'm using a library called glfx to handle the glsl shaders and I've been helping with the development of this library as well. I'm pretty sure I've got everything else right but it crashes when I call SDL_GL_BindTexture. Can anyone see what I've done wrong?
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <string>
#include <GL/glew.h>
#include <GL/glfx.h>
#include <SDL2/SDL.h>
#include <FreeImage.h>
int main()
{
SDL_Window *mainwindow;
SDL_Renderer *renderer;
SDL_GLContext maincontext;
SDL_Init( SDL_INIT_VIDEO );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 3 );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MINOR_VERSION, 2 );
SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 );
SDL_GL_SetAttribute( SDL_GL_DEPTH_SIZE, 24 );
SDL_CreateWindowAndRenderer( 512, 512, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN, &mainwindow, &renderer );
maincontext = SDL_GL_CreateContext( mainwindow );
glewExperimental = GL_TRUE;
glewInit( );
fprintf( stdout, "%s\n", glGetString(GL_VERSION) );
fprintf( stdout, "%s\n", glGetString(GL_SHADING_LANGUAGE_VERSION) );
FIBITMAP* dib = FreeImage_Load( FIF_PNG, "test.png" );
uint32_t w = FreeImage_GetWidth( dib );
uint32_t h = FreeImage_GetHeight( dib );
dib = FreeImage_ConvertTo32Bits( dib );
BYTE* pixeles = FreeImage_GetBits( dib );
GLubyte* textura = new GLubyte[4*w*h];
SDL_Texture* texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_STATIC, w, h );
const SDL_Rect rect = { 0, 0, w, h };
int pitch = 32;
SDL_LockTexture( texture, &rect, (void**)&textura, &pitch );
for( uint32_t j = 0; j < w * h; j++ )
{
textura[j*4+0] = pixeles[j*4+2];
textura[j*4+1] = pixeles[j*4+1];
textura[j*4+2] = pixeles[j*4+0];
textura[j*4+3] = pixeles[j*4+3];
}
SDL_UnlockTexture( texture );
FreeImage_Unload( dib );
delete [] textura;
int effect = glfxGenEffect( );
std::string shader;
shader ="struct VSinput\n"
"{\n"
" vec3 Position;\n"
"};\n"
"shader VSmain(in VSinput VSin, out vec2 TexCoord)\n"
"{\n"
" gl_Position = vec4(VSin.Position, 1.0);\n"
" TexCoord = vec2( 0.8, 0.8 );\n"
"};\n"
"uniform sampler2D gColorMap;\n"
"shader FSmain(in vec2 TexCoord, out vec4 FragColor)\n"
"{\n"
" FragColor = texture(gColorMap, TexCoord);\n"
"}\n"
"program SimpleTechnique\n"
"{\n"
" vs(150) = VSmain();\n"
" fs(150) = FSmain();\n"
"};\0";
glfxParseEffectFromMemory( effect, shader.c_str() );
int shaderProg = glfxCompileProgram( effect, "SimpleTechnique" );
if (shaderProg < 0)
{
std::string log = glfxGetEffectLog(effect);
fprintf( stderr, "%s\n", log.c_str() );
}
glClearColor ( 0.0, 0.0, 1.0, 1.0 );
glClear ( GL_COLOR_BUFFER_BIT );
float* vert = new float[9];
vert[0] = 0.0; vert[1] = 0.5; vert[2] =-1.0;
vert[3] =-1.0; vert[4] =-0.5; vert[5] =-1.0;
vert[6] = 1.0; vert[7] =-0.5; vert[8] =-1.0;
unsigned int m_vaoID;
unsigned int m_vboID;
glGenVertexArrays( 1, &m_vaoID );
glBindVertexArray( m_vaoID );
glGenBuffers( 1, &m_vboID );
glBindBuffer( GL_ARRAY_BUFFER, m_vboID );
glBufferData( GL_ARRAY_BUFFER, 9 * sizeof(GLfloat), vert, GL_STATIC_DRAW );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 0, 0 );
glEnableVertexAttribArray( 0 );
glEnable( GL_TEXTURE_2D );
int loc = glGetUniformLocation( shaderProg, "gColorMap" );
glActiveTexture( GL_TEXTURE0 );
SDL_GL_BindTexture(texture, NULL, NULL );
glUniform1i( loc, 0 );
glUseProgram( shaderProg );
glDrawArrays( GL_TRIANGLES, 0, 3 );
glDisableVertexAttribArray( 0 );
glBindVertexArray( 0 );
delete[] vert;
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glDeleteBuffers( 1, &m_vboID );
glDeleteVertexArrays( 1, &m_vaoID );
SDL_GL_SwapWindow( mainwindow );
SDL_Delay( 2000 );
SDL_GL_DeleteContext( maincontext );
SDL_DestroyWindow( mainwindow );
SDL_Quit( );
return 0;
}
glUniform - Specify the value of a uniform variable for the current program object
glUseProgram() then glUniform1i(), not the other way around.
EDIT: This is looking like a bug in SDL2. You might try the demo program I attached to the report and see if you can repro on your system.
EDIT2: Looks like Sam has a fix in already.