Using map with enum e class members - c++

Ok, I have the following class:
class Shader {
public:
...
private:
GLuint _vertexShader;
GLuint _fragmentShader;
}
Is it possible to create a mapping between enums(GL_VERTEX_SHADER, GL_FRAGMENT_SHADER) and the variables I declared in the class?
The reson for this is that I want to generalize a method that I am creating for this class.
switch ( shaderType )
{
case GL_VERTEX_SHADER:
_vertexShader = glCreateShader( shaderType );
glShaderSource( _vertexShader, 1, &shaderCode, 0 );
glCompileShader( _vertexShader );
glGetShaderiv( _vertexShader, GL_COMPILE_STATUS, &isCompiled );
break;
case GL_FRAGMENT_SHADER:
_fragmentShader = glCreateShader( shaderType );
glShaderSource( _fragmentShader, 1, &shaderCode, 0 );
glCompileShader( _fragmentShader );
glGetShaderiv( _fragmentShader, GL_COMPILE_STATUS, &isCompiled );
break;
}
As you can see from this code, I have to do a switch to do basically the same code, but with just the variable changing. So it would be a lot nicer if I could instead of using switch, mapping the GLenum with the respective variable.

Direct answer:
you could make a mapping from an enum to a pointer to data member:
std::map<GLenum, GLuint Shader::*> mapper;
mapper[GL_VERTEX_SHADER ] = &Shader::_vertexShader;
mapper[GL_FRAGMENT_SHADER] = &Shader::_fragmentShader;
or, if the enum id's have values 0 and 1:
std::vector<GLuint Shader::*> mapper(2);
mapper[GL_VERTEX_SHADER ] = &Shader::_vertexShader;
mapper[GL_FRAGMENT_SHADER] = &Shader::_fragmentShader;
Then you just factor out the common code by parameterization of type GLuint&.
template <typename SC, typename Bool>
void CreateShader(GLuint& shader, GLenum shaderType, SC& shaderCode, Bool& isCompiled)
{
shader = glCreateShader( shaderType );
glShaderSource( shader, 1, &shaderCode, 0 );
glCompileShader( shader );
glGetShaderiv( shader, GL_COMPILE_STATUS, &isCompiled );
}
And then:
CreateShader(this->*mapper[shaderType], shaderType, shaderCode, isCompiled);
But for this example I would just use a switch and not define, initialize and use the (global?) mapper object:
switch ( shaderType )
{
case GL_VERTEX_SHADER:
CreateShader(_vertexShader, shaderType, shaderCode, isCompiled);
break;
case GL_FRAGMENT_SHADER:
CreateShader(_fragmentShader, shaderType, shaderCode, isCompiled);
break;
}

Related

Emscripten fails to compile shader

I'm using emscripten with C++ 17 and WebGL 1.0 to try to draw some basic things in OpenGL. But I can't seem to go past shader compilation. I tried many different types of shaders to eliminate the possibility of shader being the issues.
My assumption is that it is the encoding of the shader text but I can't seem to figure it out. Maybe someone already faced this and can provide a solution.
Compiled with:
-s ENVIRONMENT=web
-s TOTAL_MEMORY=2147483648
-s MODULARIZE=1
-s EXPORT_ES6=1
-s DEMANGLE_SUPPORT=1
-s USE_PTHREADS=0
-s GL_ASSERTIONS=1
-s GL_DEBUG=1
--preload-file ./assets#/
If you're trying to build a minimal program, the file names in assets are standard.vert and standard.frag
Shaders
Vertex shader:
precision mediump float;
attribute vec2 vertPosition;
attribute vec3 vertColor;
varying vec3 fragColor;
void main() {
fragColor = vertColor;
gl_Position = vec4(vertPosition, 0.0, 1.0);
}
Fragment Shader:
precision mediump float;
varying vec3 fragColor;
void main() {
gl_FragColor = vec4(fragColor, 1.0);
}
Loader
readFile
std::string readFile( FILE* file )
{
fseek( file, 0L, SEEK_END );
long buffSize = ftell( file );
rewind( file );
std::string buffer( buffSize, '\0' );
fread( (void*)buffer.c_str(), 1, buffSize, file );
return buffer;
}
GLenum ErrorCheckValue = glGetError();
GLint gl_shader_result = GL_FALSE;
int InfoLogLength;
std::string vertex_src;
std::string frag_src;
FILE* fp = nullptr;
fp = fopen( "/standard.vert", "r" );
if ( !fp )
{
emscripten_console_error( "No file found for vertex shader" );
return;
}
vertex_src = readFile( fp );
emscripten_console_logf( "Vertex Shader:\n%s", vertex_src.c_str() );
fclose( fp );
fp = fopen( "/standard.frag", "r" );
if ( !fp )
{
emscripten_console_error( "No file found for fragment shader" );
return;
}
frag_src = readFile( fp );
emscripten_console_logf( "Fragment Shader:\n%s", frag_src.c_str() );
fclose( fp );
const char* vertexCode = vertex_src.c_str();
const char* fragCode = frag_src.c_str();
u32 vertexShaderId = glCreateShader( GL_VERTEX_SHADER );
glShaderSource( vertexShaderId, 1, (const GLchar* const*)vertexCode, NULL );
glCompileShader( vertexShaderId );
// check for vertex shader errors
glGetShaderiv( vertexShaderId, GL_COMPILE_STATUS, &gl_shader_result );
glGetShaderiv( vertexShaderId, GL_INFO_LOG_LENGTH, &InfoLogLength );
if ( InfoLogLength > 0 )
{
std::string msg( InfoLogLength + 1, '\0' );
glGetShaderInfoLog( vertexShaderId, InfoLogLength, NULL, (GLchar*)msg.c_str() );
emscripten_console_error( ( "WASM:: Vertex shader error: " + msg ).c_str() );
return;
}
u32 fragmentShaderId = glCreateShader( GL_FRAGMENT_SHADER );
glShaderSource( fragmentShaderId, 1, (const GLchar* const*)fragCode, NULL );
glCompileShader( fragmentShaderId );
// check for vertex shader errors
glGetShaderiv( fragmentShaderId, GL_COMPILE_STATUS, &gl_shader_result );
glGetShaderiv( fragmentShaderId, GL_INFO_LOG_LENGTH, &InfoLogLength );
if ( InfoLogLength > 0 )
{
std::string msg( InfoLogLength + 1, '\0' );
glGetShaderInfoLog( fragmentShaderId, InfoLogLength, NULL, (GLchar*)msg.c_str() );
emscripten_console_error( ( "WASM:: Fragment shader error: " + msg ).c_str() );
return;
}
u32 shaderProgramId = glCreateProgram();
glAttachShader( shaderProgramId, vertexShaderId );
glAttachShader( shaderProgramId, fragmentShaderId );
glLinkProgram( shaderProgramId );
// Check the program
glGetProgramiv( shaderProgramId, GL_LINK_STATUS, &gl_shader_result );
glGetProgramiv( shaderProgramId, GL_INFO_LOG_LENGTH, &InfoLogLength );
if ( InfoLogLength > 0 )
{
std::string msg( InfoLogLength + 1, '\0' );
glGetProgramInfoLog( shaderProgramId, InfoLogLength, NULL, (GLchar*)msg.c_str() );
emscripten_console_error( ( "WASM:: Shader compilation error: " + msg ).c_str() );
return;
}
emscripten_console_log( "WASM:: Compiled shaders" );
glDetachShader( shaderProgramId, vertexShaderId );
glDetachShader( shaderProgramId, fragmentShaderId );
glDeleteShader( vertexShaderId );
glDeleteShader( fragmentShaderId );
And ofcourse the error: glCompileShader: ERROR: 1:1: '' : syntax error
Thank you for your patience to read all this. Any comment, suggestion or solution are always better none so thank you.
I am not sure if this is due to WebGL 1.0/2.0 being OpenGL ES2/ES3, but in order for WebGL to correctly read the shader code, it needs to the pointer to the char[] pointer containing the shader code.
The correction to be made for vertex and fragment shaders is:
glShaderSource( vertexShaderId, 1, (const GLchar**)&vertexCode, NULL );
glShaderSource( fragmentShaderId, 1, (const GLchar**)&fragCode, NULL );

Can't render to GtkGLArea

I try to render a triangle to a GtkGLArea but I only see the color with which I cleared the frame with glClearColor().
Please note:
I know that the triangle is so big that it would fill the whole screen, but I also tried smaller ones and it didn't work either.
I also know that I should normally not create the program before each rendering, I only did it here to keep the example short.
I'm fairly certain that the error is neither in LoadShaders nor in the shaders themselves because I've tried the exact same functions with GLFW and they've worked fine their.
Things which might cause the problem:
I'm not flushing the frame currently or swapping framebuffers because the documentation (https://developer.gnome.org/gtk3/stable/GtkGLArea.html) doesn't mention that I have to. I've tried glFlush() but it didn't help either.
I assume that the screen coordinates go from -1 to 1 on all axis like in normal OpenGL. Maybe that's wrong but I couldn't find anything in the documentation there either.
Could somebody help me?
This is how I compile it:
g++ -O3 -s -o main main.cpp -isystem include -Llibs -DNDEBUG `pkg-config --cflags gtk+-3.0` `pkg-config --libs gtk+-3.0` -lepoxy -lm
This is my code:
#include <gtk/gtk.h>
#include <epoxy/gl.h>
#include <epoxy/glx.h>
#include <iostream>
#include <vector>
GLuint LoadShaders(char const* vertex, char const* fragment){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
glShaderSource(VertexShaderID, 1, &vertex , NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> VertexShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
// Compile Fragment Shader
glShaderSource(FragmentShaderID, 1, &fragment , NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> FragmentShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
// Link the program
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> ProgramErrorMessage(InfoLogLength+1);
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
printf("%s\n", &ProgramErrorMessage[0]);
}
glDetachShader(ProgramID, VertexShaderID);
glDetachShader(ProgramID, FragmentShaderID);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
char const* vertShader = R"GLSL(
#version 330 core
void main(){
gl_Position.z = 0.0;
gl_Position.w = 1.0;
if (0 == gl_VertexID) {
gl_Position.x = -100.0;
gl_Position.y = -100.0;
}
if (2 == gl_VertexID) {
gl_Position.x = 0.0;
gl_Position.y = 100.0;
}
if (1 == gl_VertexID) {
gl_Position.x = 100.0;
gl_Position.y = -100.0;
}
}
)GLSL";
char const* fragShader = R"GLSL(
#version 330 core
layout(location = 0) out vec4 color;
void main(){
color = vec4(1.0, 0.0, 0.0, 1.0);
}
)GLSL";
gboolean
render(GtkGLArea*, GdkGLContext*, gpointer) {
glClearColor(0.5, 0.5, 0.5, 0);
glClear(GL_COLOR_BUFFER_BIT);
GLuint programID;
programID = LoadShaders(vertShader, fragShader);
glUseProgram(programID);
glDrawArrays(GL_TRIANGLES, 0, 3);
//glFlush();
glDeleteProgram(programID);
return TRUE;
}
int
main(int argc, char** argv) {
gtk_init(&argc, &argv);
auto window{gtk_window_new(GTK_WINDOW_TOPLEVEL)};
auto glWidget{gtk_gl_area_new()};
gtk_container_add(GTK_CONTAINER(window), glWidget);
g_signal_connect (glWidget, "render", G_CALLBACK(render), nullptr);
gtk_widget_show_all(window);
gtk_main();
return EXIT_SUCCESS;
}
Two things I can think of:
You aren't requesting a Core context from the OS. Looks like you have to override create-context & create + return a gdk_gl_context_set_required_version'd GdkGLContext.
When you do get a Core context up & going I'm pretty sure you still need a VAO bound even if you're generating geometry entirely within your vertex shader.
RE: missing VAOs:
With this GLFW program and the VAO creation/bind commented out:
#include <glad/glad.h>
#define GLFW_INCLUDE_NONE
#include <GLFW/glfw3.h>
#include <iostream>
void CheckStatus( GLuint obj, bool isShader )
{
GLint status = GL_FALSE, log[ 1 << 11 ] = { 0 };
( isShader ? glGetShaderiv : glGetProgramiv )( obj, isShader ? GL_COMPILE_STATUS : GL_LINK_STATUS, &status );
( isShader ? glGetShaderInfoLog : glGetProgramInfoLog )( obj, sizeof( log ), NULL, (GLchar*)log );
if( status == GL_TRUE ) return;
std::cerr << (GLchar*)log << "\n";
std::exit( EXIT_FAILURE );
}
void AttachShader( GLuint program, GLenum type, const char* src )
{
GLuint shader = glCreateShader( type );
glShaderSource( shader, 1, &src, NULL );
glCompileShader( shader );
CheckStatus( shader, true );
glAttachShader( program, shader );
glDeleteShader( shader );
}
const char* vert = 1 + R"GLSL(
#version 330 core
void main(){
gl_Position.z = 0.0;
gl_Position.w = 1.0;
if (0 == gl_VertexID) {
gl_Position.x = -100.0;
gl_Position.y = -100.0;
}
if (2 == gl_VertexID) {
gl_Position.x = 0.0;
gl_Position.y = 100.0;
}
if (1 == gl_VertexID) {
gl_Position.x = 100.0;
gl_Position.y = -100.0;
}
}
)GLSL";
const char* frag = 1 + R"GLSL(
#version 330 core
layout(location = 0) out vec4 color;
void main(){
color = vec4(1.0, 0.0, 0.0, 1.0);
}
)GLSL";
int main( int, char** )
{
glfwSetErrorCallback( []( int, const char* desc ) { std::cerr << desc << "\n"; std::exit( EXIT_FAILURE ); } );
glfwInit();
glfwWindowHint( GLFW_CONTEXT_VERSION_MAJOR, 3 );
glfwWindowHint( GLFW_CONTEXT_VERSION_MINOR, 3 );
glfwWindowHint( GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE );
glfwWindowHint( GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE );
GLFWwindow* window = glfwCreateWindow( 640, 480, "GLFW", NULL, NULL );
glfwMakeContextCurrent( window );
gladLoadGLLoader( (GLADloadproc)glfwGetProcAddress );
//GLuint vao = 0;
//glGenVertexArrays( 1, &vao );
//glBindVertexArray( vao );
GLuint prog = glCreateProgram();
AttachShader( prog, GL_VERTEX_SHADER, vert );
AttachShader( prog, GL_FRAGMENT_SHADER, frag );
glLinkProgram( prog );
CheckStatus( prog, false );
while( !glfwWindowShouldClose( window ) )
{
glfwPollEvents();
int w, h;
glfwGetFramebufferSize( window, &w, &h );
glViewport( 0, 0, w, h );
glClearColor( 0.5, 0.5, 0.5, 0 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glUseProgram( prog );
glDrawArrays( GL_TRIANGLES, 0, 3 );
glfwSwapBuffers( window );
}
glfwTerminate();
}
Running on Linux with Mesa 13.0.6's llvmpipe backend & the MESA_DEBUG=1 envvar gives me a grey window and this message on stdout:
Mesa: User error: GL_INVALID_OPERATION in glDrawArrays(no VAO bound)
Restoring the VAO gives the expected red window.

Opengl Shader Program changes state without reason

I'm writing an After Effects plugin for the windows platform using OpenGL. After initializing and compiling my shader program I call
glGetProgramiv(inData.mProgramObjSu, GL_ATTACHED_SHADERS, &size); and I get back the value 3 (wich is correct because i attashed a geometry shader).
Also I call glGetAttribLocation with various attribute names and get back valid values. When my render function is called later, the shader program is completely useless, every call to glGetattribLocation returns -1 with the same attribute strings I used earlier and glGetProgramiv(inData.mProgramObjSu, GL_ATTACHED_SHADERS, &size); returns a size of 2. I've got a little experience in OpenGL programming, but I never had a problem like that before.
Here is the shader initialisation code:
GLint vertCompiledB = 0;
GLint geoCompiledB = 0;
GLint fragCompiledB = 0;
GLint linkedB = 0;
// Create vertex shader
inData.mVertexShaderSu = glCreateShader( GL_VERTEX_SHADER );
glShaderSource( inData.mVertexShaderSu, 1, &inVertexShader, nullptr );
glCompileShader( inData.mVertexShaderSu);
glGetShaderiv( inData.mVertexShaderSu, GL_COMPILE_STATUS, &vertCompiledB );
char str[4096];
if(!vertCompiledB)
{
glGetShaderInfoLog(inData.mVertexShaderSu, sizeof(str), NULL, str);
GL_CHECK(AESDK_OpenGL_ShaderInit_Err);
}
// Create geometry shader
inData.mGeometryShaderSu = glCreateShader( GL_GEOMETRY_SHADER );
glShaderSource( inData.mGeometryShaderSu, 1, &inGeometryShader, nullptr );
glCompileShader( inData.mGeometryShaderSu);
glGetShaderiv( inData.mGeometryShaderSu, GL_COMPILE_STATUS, &geoCompiledB );
if(!geoCompiledB)
{
glGetShaderInfoLog(inData.mGeometryShaderSu, sizeof(str), NULL, str);
GL_CHECK(AESDK_OpenGL_ShaderInit_Err);
}
// Create fragment shader
inData.mFragmentShaderSu = glCreateShader( GL_FRAGMENT_SHADER );
glShaderSource( inData.mFragmentShaderSu, 1, &inFragmentShader, nullptr );
glCompileShader( inData.mFragmentShaderSu );
glGetShaderiv( inData.mFragmentShaderSu, GL_COMPILE_STATUS,&fragCompiledB );
if(!fragCompiledB)
{
glGetShaderInfoLog( inData.mFragmentShaderSu, sizeof(str), NULL, str );
GL_CHECK(AESDK_OpenGL_ShaderInit_Err);
}
// Create a program object and attach the two compiled shaders...
inData.mProgramObjSu = glCreateProgram();
glAttachShader( inData.mProgramObjSu, inData.mVertexShaderSu );
glAttachShader( inData.mProgramObjSu, inData.mGeometryShaderSu );
glAttachShader( inData.mProgramObjSu, inData.mFragmentShaderSu );
// Link the program object
glLinkProgram( inData.mProgramObjSu );
glGetProgramiv( inData.mProgramObjSu, GL_LINK_STATUS, &linkedB );
inData.mPosAttribLoc = glGetAttribLocation(inData.mProgramObjSu, "pos");
inData.mNormAttribLoc = glGetAttribLocation(inData.mProgramObjSu, "norm");
inData.mMVPUnifLoc = glGetUniformLocation(inData.mProgramObjSu, "mvp");
inData.mNormMatUnifLoc = glGetUniformLocation(inData.mProgramObjSu, "normMat");
inData.mLineWidthUnifLoc = glGetUniformLocation(inData.mProgramObjSu, "width");
inData.mEdgeWidthRatioUnifLoc = glGetUniformLocation(inData.mProgramObjSu, "edgeWidthRatio");
inData.mStrokeOverflowUnifLoc = glGetUniformLocation(inData.mProgramObjSu, "strokeOverflow");
int length;
int size;
GLenum type;
GLchar name[40];
glGetActiveUniform(inData.mProgramObjSu, 1, 40, &length, &size, &type, name);
glGetProgramiv(inData.mProgramObjSu, GL_ATTACHED_SHADERS, &size);
if( !linkedB )
{
int length;
glGetShaderInfoLog( inData.mProgramObjSu, 1000, &length, str );
GL_CHECK(AESDK_OpenGL_ShaderInit_Err);
}

GLSL loading core dumped

I am compiling this project on a linux machine. After fixing all the include and lib errors, I get a core dumped error. but if I comment out the initGLSL line, it runs well. The initGLSL function is given below.
void Viewer :: initGLSL( void )
{
shader.loadVertex( "shaders/vertex.glsl" );
shader.loadFragment( "shaders/fragment.glsl" );
}
vertex.glsl:
varying vec3 position;
varying vec3 normal;
void main()
{
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
gl_FrontColor = gl_Color;
position = gl_Vertex.xyz;
normal = gl_Normal.xyz;
}
I don't know how to debug this program. How can I check if this error is because of the GLSL, or the loader function?
---------------Thanks for your comment------------
The load function:
void Shader::loadVertex( const char* filename )
{
load( GL_VERTEX_SHADER, filename, vertexShader );
}
void Shader::loadFragment( const char* filename )
{
load( GL_FRAGMENT_SHADER, filename, fragmentShader );
}
void Shader::load( GLenum shaderType, const char* filename, GLuint& shader )
// read vertex shader from GLSL source file, compile, and attach to program
{
string source;
if( !readSource( filename, source ))
{
return;
}
if( program == 0 )
{
program = glCreateProgram();
}
if( shader != 0 )
{
glDetachShader( program, shader );
}
shader = glCreateShader( shaderType );
const char* source_c_str = source.c_str();
glShaderSource( shader, 1, &(source_c_str), NULL );
glCompileShader( shader );
GLint compileStatus;
glGetShaderiv( shader, GL_COMPILE_STATUS, &compileStatus );
if( compileStatus == GL_TRUE )
{
glAttachShader( program, shader );
linked = false;
}
else
{
GLsizei maxLength = 0;
glGetShaderiv( shader, GL_INFO_LOG_LENGTH, &maxLength );
if( maxLength > 0 )
{
GLchar* infoLog = new char[ maxLength ];
GLsizei length;
glGetShaderInfoLog( shader, maxLength, &length, infoLog );
cerr << "GLSL Error: " << infoLog << endl;
delete[] infoLog;
}
}
}
and when I tried debugging with gdb, I get the msg:
(gdb) p (filename)
$1 = 0x482e41 "shaders/vertex.glsl"
(gdb) n
77 if( program == 0 )
(gdb) n
79 program = glCreateProgram();
(gdb) n
Program received signal SIGSEGV, Segmentation fault.
0x0000000000000000 in ?? ()
Have you definitely created an OpenGL context before calling shader.load*?
E.g. one of these, if you're using the corresponding library:
glutInit
glutCreateWindow
glfwInit
glfwCreateWindow
SDL_init
SDL_CreateWindow
SDL_GL_CreateContext
I don't have much experience with this, but some GL functions aren't linked at runtime by default - glCreateProgram may still be NULL after creating a context. I use GLEW to do this for me (glewInit()), but there are other ways.

Reading OpenGL Shader Language from .glsl File

I am trying to read shader strings from a file; however, I faced with a problem at glShaderSource() function line. As you know, glShaderSource() takes const char**, and I have to declare char * for reading from the file. So, I am using casting to convert types.
If I use const_cast<const char **>, the shape appears; however, it has wrong color (It should be orange not white).
If I use reinterpret_cast<const char**>, I get a Access violation reading location 0x73726576 error in running time.
So, how can I solve this problem? Thank you!
Platform: Windows 7, Visual Studio 2010
Code Lines:
File: shader.glsl
#version 330
in vec3 vp;
void main() {
gl_Position = vec4( vp, 1.0);
}
main():
/* FILE READING */
FILE* shaderFile = fopen( "shader.glsl ", "r");
int fileSize = 0;
char* vertex_shader = NULL;
//Getting File Size
fseek( shaderFile, 0, SEEK_END );
fileSize = ftell( shaderFile );
rewind( shaderFile );
//Reading From File
vertex_shader = (char*)malloc( sizeof( char) * (fileSize+1) );
fread( vertex_shader, sizeof( char ), fileSize, shaderFile );
vertex_shader[ fileSize] = '\0';
fclose( shaderFile );
//Shader definition - If I used this format, it works.
/*const char* vertex_shader = "#version 330\n"
"in vec3 vp;"
"void main(){"
"gl_Position = vec4( vp, 1.0);"
"}";*/
//If I use const char* vertex_shader above, it appears orange.
const char* fragment_shader = "#version 330\n"
"out vec4 frag_colour;"
"void main () {"
" frag_colour = vec4(0.7, 0.4, 0.2, 1.0);"
"}";
//Shader compiling
unsigned int vertexShader = glCreateShader( GL_VERTEX_SHADER );
//The shape appears but not orange
glShaderSource( vertexShader, 1, const_cast<const char **>(&vertex_shader) , NULL );
//glShaderSource( vertexShader, 1, reinterpret_cast<const char**>(vertex_shader) , NULL ); //Gives error
glCompileShader( vertexShader );
unsigned int fragmentShader = glCreateShader( GL_FRAGMENT_SHADER );
glShaderSource( fragmentShader, 1, &fragment_shader, NULL);
glCompileShader( fragmentShader );
//Shader program
unsigned int shaderProgram = glCreateProgram();
glAttachShader( shaderProgram, fragmentShader );
glAttachShader( shaderProgram, vertexShader );
glLinkProgram( shaderProgram );
//Drawing
while( !glfwWindowShouldClose( window ) )
{
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glUseProgram( shaderProgram );
glBindVertexArray( vao );
glDrawArrays( GL_TRIANGLES, 0, 3);
glfwPollEvents();
glfwSwapBuffers( window);
}
Pass the address of the array to the function:
glShaderSource( vertexShader, 1, (const GLchar**)&vertex_shader, NULL);
EDIT:
Thanks for updating the code in the question. If your program compiles, doesn't crash but the shaders still don't work, it's time to investigate if the GLSL compiler returned any errors! After each call to glCompileShader() write something like the following to display any problems that occurred during compilation:
// This checks for errors upon compiling the Vertex Shader
GLint _compiled = 0;
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &_compiled);
if (!_compiled)
{
GLint length;
GLchar* log;
glGetShaderiv(vertexShader, GL_INFO_LOG_LENGTH, &length);
log = new GLchar[length];
glGetShaderInfoLog(vertexShader, length, &length, log);
std::cerr << "!!! Compile log = " << log << std::endl;
delete log;
return;
}
The shaderSource functiont takes a char ** so take your char * and just use the & operator on it instead of trying to just cast it.