Bind SDL2 texture to GLSL shader - opengl

I'm trying to bind a sdl2 texture to a glsl shader though I'm not entirely sure how? I'm using a library called glfx to handle the glsl shaders and I've been helping with the development of this library as well. I'm pretty sure I've got everything else right but it crashes when I call SDL_GL_BindTexture. Can anyone see what I've done wrong?
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <string>
#include <GL/glew.h>
#include <GL/glfx.h>
#include <SDL2/SDL.h>
#include <FreeImage.h>
int main()
{
SDL_Window *mainwindow;
SDL_Renderer *renderer;
SDL_GLContext maincontext;
SDL_Init( SDL_INIT_VIDEO );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 3 );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MINOR_VERSION, 2 );
SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 );
SDL_GL_SetAttribute( SDL_GL_DEPTH_SIZE, 24 );
SDL_CreateWindowAndRenderer( 512, 512, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN, &mainwindow, &renderer );
maincontext = SDL_GL_CreateContext( mainwindow );
glewExperimental = GL_TRUE;
glewInit( );
fprintf( stdout, "%s\n", glGetString(GL_VERSION) );
fprintf( stdout, "%s\n", glGetString(GL_SHADING_LANGUAGE_VERSION) );
FIBITMAP* dib = FreeImage_Load( FIF_PNG, "test.png" );
uint32_t w = FreeImage_GetWidth( dib );
uint32_t h = FreeImage_GetHeight( dib );
dib = FreeImage_ConvertTo32Bits( dib );
BYTE* pixeles = FreeImage_GetBits( dib );
GLubyte* textura = new GLubyte[4*w*h];
SDL_Texture* texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_STATIC, w, h );
const SDL_Rect rect = { 0, 0, w, h };
int pitch = 32;
SDL_LockTexture( texture, &rect, (void**)&textura, &pitch );
for( uint32_t j = 0; j < w * h; j++ )
{
textura[j*4+0] = pixeles[j*4+2];
textura[j*4+1] = pixeles[j*4+1];
textura[j*4+2] = pixeles[j*4+0];
textura[j*4+3] = pixeles[j*4+3];
}
SDL_UnlockTexture( texture );
FreeImage_Unload( dib );
delete [] textura;
int effect = glfxGenEffect( );
std::string shader;
shader ="struct VSinput\n"
"{\n"
" vec3 Position;\n"
"};\n"
"shader VSmain(in VSinput VSin, out vec2 TexCoord)\n"
"{\n"
" gl_Position = vec4(VSin.Position, 1.0);\n"
" TexCoord = vec2( 0.8, 0.8 );\n"
"};\n"
"uniform sampler2D gColorMap;\n"
"shader FSmain(in vec2 TexCoord, out vec4 FragColor)\n"
"{\n"
" FragColor = texture(gColorMap, TexCoord);\n"
"}\n"
"program SimpleTechnique\n"
"{\n"
" vs(150) = VSmain();\n"
" fs(150) = FSmain();\n"
"};\0";
glfxParseEffectFromMemory( effect, shader.c_str() );
int shaderProg = glfxCompileProgram( effect, "SimpleTechnique" );
if (shaderProg < 0)
{
std::string log = glfxGetEffectLog(effect);
fprintf( stderr, "%s\n", log.c_str() );
}
glClearColor ( 0.0, 0.0, 1.0, 1.0 );
glClear ( GL_COLOR_BUFFER_BIT );
float* vert = new float[9];
vert[0] = 0.0; vert[1] = 0.5; vert[2] =-1.0;
vert[3] =-1.0; vert[4] =-0.5; vert[5] =-1.0;
vert[6] = 1.0; vert[7] =-0.5; vert[8] =-1.0;
unsigned int m_vaoID;
unsigned int m_vboID;
glGenVertexArrays( 1, &m_vaoID );
glBindVertexArray( m_vaoID );
glGenBuffers( 1, &m_vboID );
glBindBuffer( GL_ARRAY_BUFFER, m_vboID );
glBufferData( GL_ARRAY_BUFFER, 9 * sizeof(GLfloat), vert, GL_STATIC_DRAW );
glVertexAttribPointer( 0, 3, GL_FLOAT, GL_FALSE, 0, 0 );
glEnableVertexAttribArray( 0 );
glEnable( GL_TEXTURE_2D );
int loc = glGetUniformLocation( shaderProg, "gColorMap" );
glActiveTexture( GL_TEXTURE0 );
SDL_GL_BindTexture(texture, NULL, NULL );
glUniform1i( loc, 0 );
glUseProgram( shaderProg );
glDrawArrays( GL_TRIANGLES, 0, 3 );
glDisableVertexAttribArray( 0 );
glBindVertexArray( 0 );
delete[] vert;
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glDeleteBuffers( 1, &m_vboID );
glDeleteVertexArrays( 1, &m_vaoID );
SDL_GL_SwapWindow( mainwindow );
SDL_Delay( 2000 );
SDL_GL_DeleteContext( maincontext );
SDL_DestroyWindow( mainwindow );
SDL_Quit( );
return 0;
}

glUniform - Specify the value of a uniform variable for the current program object
glUseProgram() then glUniform1i(), not the other way around.
EDIT: This is looking like a bug in SDL2. You might try the demo program I attached to the report and see if you can repro on your system.
EDIT2: Looks like Sam has a fix in already.

Related

Can't render to GtkGLArea

I try to render a triangle to a GtkGLArea but I only see the color with which I cleared the frame with glClearColor().
Please note:
I know that the triangle is so big that it would fill the whole screen, but I also tried smaller ones and it didn't work either.
I also know that I should normally not create the program before each rendering, I only did it here to keep the example short.
I'm fairly certain that the error is neither in LoadShaders nor in the shaders themselves because I've tried the exact same functions with GLFW and they've worked fine their.
Things which might cause the problem:
I'm not flushing the frame currently or swapping framebuffers because the documentation (https://developer.gnome.org/gtk3/stable/GtkGLArea.html) doesn't mention that I have to. I've tried glFlush() but it didn't help either.
I assume that the screen coordinates go from -1 to 1 on all axis like in normal OpenGL. Maybe that's wrong but I couldn't find anything in the documentation there either.
Could somebody help me?
This is how I compile it:
g++ -O3 -s -o main main.cpp -isystem include -Llibs -DNDEBUG `pkg-config --cflags gtk+-3.0` `pkg-config --libs gtk+-3.0` -lepoxy -lm
This is my code:
#include <gtk/gtk.h>
#include <epoxy/gl.h>
#include <epoxy/glx.h>
#include <iostream>
#include <vector>
GLuint LoadShaders(char const* vertex, char const* fragment){
// Create the shaders
GLuint VertexShaderID = glCreateShader(GL_VERTEX_SHADER);
GLuint FragmentShaderID = glCreateShader(GL_FRAGMENT_SHADER);
GLint Result = GL_FALSE;
int InfoLogLength;
// Compile Vertex Shader
glShaderSource(VertexShaderID, 1, &vertex , NULL);
glCompileShader(VertexShaderID);
// Check Vertex Shader
glGetShaderiv(VertexShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(VertexShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> VertexShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(VertexShaderID, InfoLogLength, NULL, &VertexShaderErrorMessage[0]);
printf("%s\n", &VertexShaderErrorMessage[0]);
}
// Compile Fragment Shader
glShaderSource(FragmentShaderID, 1, &fragment , NULL);
glCompileShader(FragmentShaderID);
// Check Fragment Shader
glGetShaderiv(FragmentShaderID, GL_COMPILE_STATUS, &Result);
glGetShaderiv(FragmentShaderID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> FragmentShaderErrorMessage(InfoLogLength+1);
glGetShaderInfoLog(FragmentShaderID, InfoLogLength, NULL, &FragmentShaderErrorMessage[0]);
printf("%s\n", &FragmentShaderErrorMessage[0]);
}
// Link the program
GLuint ProgramID = glCreateProgram();
glAttachShader(ProgramID, VertexShaderID);
glAttachShader(ProgramID, FragmentShaderID);
glLinkProgram(ProgramID);
// Check the program
glGetProgramiv(ProgramID, GL_LINK_STATUS, &Result);
glGetProgramiv(ProgramID, GL_INFO_LOG_LENGTH, &InfoLogLength);
if ( InfoLogLength > 0 ){
std::vector<char> ProgramErrorMessage(InfoLogLength+1);
glGetProgramInfoLog(ProgramID, InfoLogLength, NULL, &ProgramErrorMessage[0]);
printf("%s\n", &ProgramErrorMessage[0]);
}
glDetachShader(ProgramID, VertexShaderID);
glDetachShader(ProgramID, FragmentShaderID);
glDeleteShader(VertexShaderID);
glDeleteShader(FragmentShaderID);
return ProgramID;
}
char const* vertShader = R"GLSL(
#version 330 core
void main(){
gl_Position.z = 0.0;
gl_Position.w = 1.0;
if (0 == gl_VertexID) {
gl_Position.x = -100.0;
gl_Position.y = -100.0;
}
if (2 == gl_VertexID) {
gl_Position.x = 0.0;
gl_Position.y = 100.0;
}
if (1 == gl_VertexID) {
gl_Position.x = 100.0;
gl_Position.y = -100.0;
}
}
)GLSL";
char const* fragShader = R"GLSL(
#version 330 core
layout(location = 0) out vec4 color;
void main(){
color = vec4(1.0, 0.0, 0.0, 1.0);
}
)GLSL";
gboolean
render(GtkGLArea*, GdkGLContext*, gpointer) {
glClearColor(0.5, 0.5, 0.5, 0);
glClear(GL_COLOR_BUFFER_BIT);
GLuint programID;
programID = LoadShaders(vertShader, fragShader);
glUseProgram(programID);
glDrawArrays(GL_TRIANGLES, 0, 3);
//glFlush();
glDeleteProgram(programID);
return TRUE;
}
int
main(int argc, char** argv) {
gtk_init(&argc, &argv);
auto window{gtk_window_new(GTK_WINDOW_TOPLEVEL)};
auto glWidget{gtk_gl_area_new()};
gtk_container_add(GTK_CONTAINER(window), glWidget);
g_signal_connect (glWidget, "render", G_CALLBACK(render), nullptr);
gtk_widget_show_all(window);
gtk_main();
return EXIT_SUCCESS;
}
Two things I can think of:
You aren't requesting a Core context from the OS. Looks like you have to override create-context & create + return a gdk_gl_context_set_required_version'd GdkGLContext.
When you do get a Core context up & going I'm pretty sure you still need a VAO bound even if you're generating geometry entirely within your vertex shader.
RE: missing VAOs:
With this GLFW program and the VAO creation/bind commented out:
#include <glad/glad.h>
#define GLFW_INCLUDE_NONE
#include <GLFW/glfw3.h>
#include <iostream>
void CheckStatus( GLuint obj, bool isShader )
{
GLint status = GL_FALSE, log[ 1 << 11 ] = { 0 };
( isShader ? glGetShaderiv : glGetProgramiv )( obj, isShader ? GL_COMPILE_STATUS : GL_LINK_STATUS, &status );
( isShader ? glGetShaderInfoLog : glGetProgramInfoLog )( obj, sizeof( log ), NULL, (GLchar*)log );
if( status == GL_TRUE ) return;
std::cerr << (GLchar*)log << "\n";
std::exit( EXIT_FAILURE );
}
void AttachShader( GLuint program, GLenum type, const char* src )
{
GLuint shader = glCreateShader( type );
glShaderSource( shader, 1, &src, NULL );
glCompileShader( shader );
CheckStatus( shader, true );
glAttachShader( program, shader );
glDeleteShader( shader );
}
const char* vert = 1 + R"GLSL(
#version 330 core
void main(){
gl_Position.z = 0.0;
gl_Position.w = 1.0;
if (0 == gl_VertexID) {
gl_Position.x = -100.0;
gl_Position.y = -100.0;
}
if (2 == gl_VertexID) {
gl_Position.x = 0.0;
gl_Position.y = 100.0;
}
if (1 == gl_VertexID) {
gl_Position.x = 100.0;
gl_Position.y = -100.0;
}
}
)GLSL";
const char* frag = 1 + R"GLSL(
#version 330 core
layout(location = 0) out vec4 color;
void main(){
color = vec4(1.0, 0.0, 0.0, 1.0);
}
)GLSL";
int main( int, char** )
{
glfwSetErrorCallback( []( int, const char* desc ) { std::cerr << desc << "\n"; std::exit( EXIT_FAILURE ); } );
glfwInit();
glfwWindowHint( GLFW_CONTEXT_VERSION_MAJOR, 3 );
glfwWindowHint( GLFW_CONTEXT_VERSION_MINOR, 3 );
glfwWindowHint( GLFW_OPENGL_FORWARD_COMPAT, GLFW_TRUE );
glfwWindowHint( GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE );
GLFWwindow* window = glfwCreateWindow( 640, 480, "GLFW", NULL, NULL );
glfwMakeContextCurrent( window );
gladLoadGLLoader( (GLADloadproc)glfwGetProcAddress );
//GLuint vao = 0;
//glGenVertexArrays( 1, &vao );
//glBindVertexArray( vao );
GLuint prog = glCreateProgram();
AttachShader( prog, GL_VERTEX_SHADER, vert );
AttachShader( prog, GL_FRAGMENT_SHADER, frag );
glLinkProgram( prog );
CheckStatus( prog, false );
while( !glfwWindowShouldClose( window ) )
{
glfwPollEvents();
int w, h;
glfwGetFramebufferSize( window, &w, &h );
glViewport( 0, 0, w, h );
glClearColor( 0.5, 0.5, 0.5, 0 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glUseProgram( prog );
glDrawArrays( GL_TRIANGLES, 0, 3 );
glfwSwapBuffers( window );
}
glfwTerminate();
}
Running on Linux with Mesa 13.0.6's llvmpipe backend & the MESA_DEBUG=1 envvar gives me a grey window and this message on stdout:
Mesa: User error: GL_INVALID_OPERATION in glDrawArrays(no VAO bound)
Restoring the VAO gives the expected red window.

How to draw points efficiently

My program receives PCL pointcloud and plot each point one by one using:
glBegin(GL_POINTS);
glVertex3f(point.x, point.y, point].z);
glEnd();
It works but due to the large number of points the program is pretty slow. Is there a more efficient way to do this?
Jam all the points into a big VBO when the point-cloud changes & draw 'em all in one go using a single glDrawArrays() call. That way OpenGL can shift all the vertex data to GPU once instead of you spoon-feeding the driver geometry one glVertex() at a time every frame.
Heck, even vertex arrays will buy you a huge speed-up by avoiding hundreds of thousands of function-calls into the GL driver.
EDIT: Comparison:
10 million random points, using vertex buffer objects:
Vertex arrays:
Display lists:
And using immediate-mode:
Code (hit 'n' to cycle between drawing methods):
// http://glew.sourceforge.net/
#include <GL/glew.h>
// http://freeglut.sourceforge.net/
#include <GL/freeglut.h>
// http://glm.g-truc.net/
#include <glm/glm.hpp>
#include <glm/gtc/random.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <vector>
#include <sstream>
#include <chrono>
#include <cstddef>
struct Vertex
{
glm::vec4 pos;
glm::vec4 color;
};
std::vector< Vertex > verts;
GLuint vbo = 0;
GLuint dlist = 0;
void init()
{
// init geometry
for( size_t i = 0; i < 10000000; i++ )
{
Vertex vert;
vert.pos = glm::vec4( glm::linearRand( glm::vec3( -1.0f, -1.0f, -1.0f ), glm::vec3( 1.0f, 1.0f, 1.0f ) ), 1.0f );
vert.color = glm::vec4( glm::linearRand( glm::vec3( 0.00f, 0.0f, 0.0f ), glm::vec3( 1.0f, 1.0f, 1.0f ) ), 1.0f );
verts.push_back( vert );
}
// create display list
dlist = glGenLists( 1 );
glNewList( dlist, GL_COMPILE );
glBegin( GL_POINTS );
for( size_t i = 0; i < verts.size(); ++i )
{
glColor4fv( glm::value_ptr( verts[i].color) );
glVertex4fv( glm::value_ptr( verts[i].pos) );
}
glEnd();
glEndList();
// create VBO
glGenBuffers( 1, &vbo );
glBindBuffer( GL_ARRAY_BUFFER, vbo );
glBufferData( GL_ARRAY_BUFFER, sizeof( Vertex ) * verts.size(), verts.data(), GL_STATIC_DRAW );
}
unsigned int method = 0;
void keyboard( unsigned char key, int x, int y )
{
if( 'n' == key )
{
method++;
if( method > 3 ) method = 0;
}
}
void display()
{
// timekeeping
static std::chrono::steady_clock::time_point prv = std::chrono::steady_clock::now();
std::chrono::steady_clock::time_point cur = std::chrono::steady_clock::now();
const float dt = std::chrono::duration< float >( cur - prv ).count();
prv = cur;
glClearColor( 0, 0, 0, 1 );
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
double w = glutGet( GLUT_WINDOW_WIDTH );
double h = glutGet( GLUT_WINDOW_HEIGHT );
gluPerspective( 60.0, w / h, 0.1, 10.0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
gluLookAt( 2, 2, 2, 0, 0, 0, 0, 0, 1 );
static float angle = 0.0f;
angle += dt * 6.0f;
glRotatef( angle, 0, 0, 1 );
// render
switch( method )
{
case 0:
// VBO
glBindBuffer( GL_ARRAY_BUFFER, vbo );
glEnableClientState( GL_VERTEX_ARRAY );
glEnableClientState( GL_COLOR_ARRAY );
glVertexPointer( 4, GL_FLOAT, sizeof( Vertex ), (void*)offsetof( Vertex, pos ) );
glColorPointer( 4, GL_FLOAT, sizeof( Vertex ), (void*)offsetof( Vertex, color ) );
glDrawArrays( GL_POINTS, 0, verts.size() );
glDisableClientState( GL_VERTEX_ARRAY );
glDisableClientState( GL_COLOR_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
break;
case 1:
// vertex array
glEnableClientState( GL_VERTEX_ARRAY );
glEnableClientState( GL_COLOR_ARRAY );
glVertexPointer( 4, GL_FLOAT, sizeof( Vertex ), &verts[0].pos );
glColorPointer( 4, GL_FLOAT, sizeof( Vertex ), &verts[0].color );
glDrawArrays( GL_POINTS, 0, verts.size() );
glDisableClientState( GL_VERTEX_ARRAY );
glDisableClientState( GL_COLOR_ARRAY );
break;
case 2:
// display list
glCallList( dlist );
break;
case 3:
// immediate mode
glBegin( GL_POINTS );
for( size_t i = 0; i < verts.size(); ++i )
{
glColor4fv( glm::value_ptr( verts[i].color) );
glVertex4fv( glm::value_ptr( verts[i].pos) );
}
glEnd();
break;
}
// info/frame time output
std::stringstream msg;
msg << "Using ";
switch( method )
{
case 0: msg << "vertex buffer object"; break;
case 1: msg << "vertex array"; break;
case 2: msg << "display list"; break;
case 3: msg << "immediate mode"; break;
}
msg << std::endl;
msg << "Frame time: " << (dt * 1000.0f) << " ms";
glColor3ub( 255, 255, 0 );
glWindowPos2i( 10, 25 );
glutBitmapString( GLUT_BITMAP_9_BY_15, (unsigned const char*)( msg.str().c_str() ) );
glutSwapBuffers();
}
int main(int argc, char **argv)
{
glutInit( &argc, argv );
glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE );
glutInitWindowSize( 600, 600 );
glutCreateWindow( "GLUT" );
glewInit();
init();
glutDisplayFunc( display );
glutKeyboardFunc( keyboard );
glutIdleFunc( display );
glutMainLoop();
return 0;
}
Yes definitely, the code you are showing is from a quite old version of OpenGL.
In more recent versions you can pack your data together and send it to the GPU in one call. The code becomes a little bit more complex but it is worth it.
I suggest you to look at this website : https://learnopengl.com/
It gathers everything you need to start using modern opengl.
Hope it helped.

OpenGL triangle not appearing

Hello everyone im trying to learn openGL using shaders so i made a triangle that changes its color while it moves but for some reason the trigle does not appear in the window, just a black background , it doesn't crash or show any kind of error, i believe the libraries set up is ok since i've used them before.
Im using CLion on Ubuntu 16.04. Here is my code so hopefully you can check it out and give me a hand. Thanks!
#define GLEW_STATIC
#include <stdlib.h>
#include <stdio.h>
#include <GL/glew.h>
#include <GL/freeglut.h>
#include <iostream>
int CurrentWidth = 800,
CurrentHeight = 600,
WindowHandle = 0;
GLuint
VertexShaderId,
FragmentShaderId,
ProgramId;
GLuint s_vertexLoc, s_colorLoc , s_factorLoc;
GLfloat runner = 0.0;
const GLchar* VertexShader =
{
"#version 150\n"
"attribute vec3 in_vertex;"
"attribute vec3 in_color;"
"uniform float factor;"
"varying vec3 intp_color;"
"void main(void)"
"{"
"intp_color = in_color;"
"gl_Position = vec4( in_vertex , 1.0 ) ;"
"gl_Position.y += factor; "
"}"
};
//Telling every single pixel is going to be red
const GLchar* FragmentShader =
{
"#version 150\n"
"uniform float factor;"
"varying vec3 intp_color;"
"void main(void){"
"gl_FragColor = vec4( intp_color , 1.0) * factor;"
"}"
};
// for. dec.
void ResizeFunction(int, int);
void RenderFunction(void);
void IdleFunction(void);
void CreateShaders(void);
// set up an array for the geometry of the object
GLfloat Vertices[] = {
-0.5f , -0.2f , 0.0f, // point A - x , y , z
0.5f , -0.2f , 0.0f, // point B - x , y , z
0.0f , 0.8f , 0.0f // point C - x , y , z
};
GLfloat Colors[] = {
1.0f , 0.0f , 0.0f, // point A - x , y , z
0.0f , 1.0f , 0.0f, // point B - x , y , z
0.0f , 0.0f , 1.0f // point C - x , y , z
};
int main( int argc , char* argv[] )
{
glutInit( &argc , argv);
//Target version 3.1
glutInitContextVersion(3 , 1 );
glutInitWindowSize(CurrentWidth, CurrentHeight);
glutInitDisplayMode( GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA );
WindowHandle = glutCreateWindow( "OpenGL - Shader example" );
glutReshapeFunc( ResizeFunction );
glutDisplayFunc( RenderFunction );
glutIdleFunc( IdleFunction );
/// init GLEW
GLenum GlewInitResult;
GlewInitResult = glewInit();
if (GLEW_OK != GlewInitResult)
exit(EXIT_FAILURE);
/// Create our shaders
CreateShaders();
glutMainLoop();
exit(EXIT_SUCCESS);
}
void ResizeFunction(int Width, int Height)
{
CurrentWidth = Width;
CurrentHeight = Height;
glViewport( 0, 0, CurrentWidth, CurrentHeight );
glClearColor( 0.0f, 0.0f, 0.0f, 0.0f );
}
void RenderFunction(void)
{
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
//We enable the shader variable
glEnableVertexAttribArray( s_vertexLoc );
glEnableVertexAttribArray( s_colorLoc );
//How to send data to the variable:
//( Where to send the data , how its grouped , data type , dont normalize the data,
// there is no offset, you find it here)
glVertexAttribPointer( s_vertexLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Vertices );
glVertexAttribPointer ( s_colorLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Colors);
glUniform1f( s_factorLoc , runner );
runner += 0.01;
if(runner > 1.0f)
runner = -1.0f;
glDrawArrays( GL_TRIANGLES , 0 , 3 );
glDisableVertexAttribArray( s_vertexLoc );
glDisableVertexAttribArray( s_colorLoc );
glutSwapBuffers();
}
void IdleFunction(void)
{
glutPostRedisplay();
}
//Error checking
void printLog(GLuint obj)
{
int infologLength = 0;
int maxLength;
if( glIsShader( obj ) )
glGetShaderiv( obj , GL_INFO_LOG_LENGTH , &maxLength );
else
glGetProgramiv( obj, GL_INFO_LOG_LENGTH, &maxLength);
char infoLog[1255];
if ( glIsShader(obj) )
glGetShaderInfoLog( obj, maxLength, &infologLength, infoLog );
else
glGetProgramInfoLog( obj, maxLength, &infologLength, infoLog );
if ( infologLength > 0 )
printf( "\n Error detail: %s\n" , infoLog );
}
void CreateShaders(void)
{
GLenum ErrorCheckValue = glGetError();
if( glCreateShader )
printf(" ---- shader suppot ok ---");
else
{
printf(" ---- no shader support ---");
return ;
}
///The VERTEX shader is created (tell it what it is)
VertexShaderId = glCreateShader( GL_VERTEX_SHADER );
//(shader we want to set the source, how many are they, the source of the shader , )
glShaderSource( VertexShaderId , 1 , &VertexShader , nullptr );
//Compile the shader
glCompileShader( VertexShaderId );
//Error checking
printLog( VertexShaderId );
///The FRAGMENT shader is created (tell it what it is)
FragmentShaderId = glCreateShader( GL_FRAGMENT_SHADER );
//(shader we want to set the source, how many are they, the source of the shader , )
glShaderSource( FragmentShaderId , 1 , &FragmentShader , nullptr );
//Compile the shader
glCompileShader( FragmentShaderId );
//Error checking
printLog( FragmentShaderId );
///Program Object links both shaders
ProgramId = glCreateProgram();
//Attach the compiled shaders to the program
glAttachShader( ProgramId , VertexShaderId );
glAttachShader( ProgramId , FragmentShaderId );
//Links the shaders to the program
glLinkProgram( ProgramId );
//Error checking
printLog( ProgramId );
//Uses the program to render
glUseProgram( ProgramId );
//( check this program , for this variable and return it)
s_vertexLoc = glGetAttribLocation( ProgramId , "in_vertex" );
s_colorLoc = glGetAttribLocation( ProgramId , "in_color" );
s_factorLoc = glGetUniformLocation( ProgramId , "factor" );
}
You have several issues.
First, using OpenGL 3.1 (GLSL 1.30) is discouraged. Try with at least
3.2 (GLSL 1.50) and ask for that Core Profile in your glut initialization. Then replace "attribute" with "in" in your
vertex shader and "varying" with "in" or "out" depending
if this var is going to be used as an input or an output.
Second. This
//How to send data to the variable:
//( Where to send the data , how its grouped , data type , dont normalize the data,
// there is no offset, you find it here)
glVertexAttribPointer( s_vertexLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Vertices );
glVertexAttribPointer ( s_colorLoc , 3 , GL_FLOAT , GL_FALSE , 0 , Colors);
is not true. That last value is a byte offset into the buffer object's data store, not the data itself.
Third: You need a buffer for your vertices coordinates. Use
glGenBuffers(), glBufferData(), and glBindBuffer()
If I were in your case I'd follow some tutorial on "modern OpenGL". Search the web, there are a lot.

Why won't this simple OpenGL ES 2.0/SDL 2 program let me change my point sprite size?

I was working on a simple OpenGL ES 2.0 program (along with SDL 2 to make things a bit easier) and decided to try out point sprites. I was able to get them to draw successfully, but I wasn't able to change their size by outputting gl_PointSize from the vertex shader. Theoretically, that should be all that I have to do.
The following code snippet is a very stripped-down version of my barely-C++ code (no error-checking at all, but that is because nothing should go wrong with it) that demonstrates how I am trying to change the size of my point sprites. It has been tested on two rather different computers with similar results (Linux, but 32-bit/software rendering vs 64-bit/discrete GPU), and may be compiled using g++ with g++ main.cpp -lSDL2 -Wall -D_REENTRANT -lGLESv2.
#include <GLES2/gl2.h>
#include <SDL2/SDL.h>
struct myData {
SDL_Window *window;
SDL_GLContext context;
};
const GLchar vertex[] =
"#version 100\n"
"precision mediump float;\n"
"void main()\n"
"{\n"
" gl_Position = vec4(0.0, 0.0, 0.0, 1.0);\n"
" gl_PointSize = 128.0;\n"
"}\0";
const GLchar fragment[] =
"#version 100\n"
"precision mediump float;\n"
"void main()\n"
"{\n"
" gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n"
"}\0";
GLuint loadShader(GLuint program, GLenum type, const GLchar *shaderSrc) {
GLuint shader;
shader = glCreateShader(type);
glShaderSource(shader, 1, &shaderSrc, NULL);
glCompileShader(shader);
glAttachShader(program, shader);
return 0;
}
int sdlInit(myData *data) {
SDL_Init(SDL_INIT_VIDEO);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
data->window = SDL_CreateWindow("Demo", 0, 0, 512, 512, SDL_WINDOW_OPENGL | SDL_WINDOW_SHOWN);
data->context = SDL_GL_CreateContext(data->window);
return 0;
}
int glInit(myData *data) {
GLuint programObject;
programObject = glCreateProgram();
loadShader(programObject, GL_VERTEX_SHADER, vertex);
loadShader(programObject, GL_FRAGMENT_SHADER, fragment);
glLinkProgram(programObject);
glUseProgram(programObject);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glViewport(0, 0, 512, 512);
return 0;
}
int loopFunc(myData *data) {
SDL_Event event;
while (SDL_PollEvent(&event)) {
if (event.type == SDL_QUIT) {
return 1;
}
}
glClear(GL_COLOR_BUFFER_BIT);
glDrawArrays(GL_POINTS, 0, 1);
SDL_GL_SwapWindow(data->window);
return 0;
}
void sdlQuit(myData *data) {
SDL_GL_DeleteContext(data->context);
SDL_DestroyWindow(data->window);
SDL_Quit();
return;
}
int main() {
myData data;
sdlInit(&data);
glInit(&data);
while (!loopFunc(&data));
sdlQuit(&data);
return 0;
}
When ran, the program should produce a point sprite with a size of 128 pixels, per the value that I set in the vertex shader. However, when actually executed, the size of the points sprite in the center of the window is exactly one pixel. What am I doing wrong?
I think you forgot initialize the opengles2 context.
SDL_Init(SDL_INIT_VIDEO);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MAJOR_VERSION, 2);
SDL_GL_SetAttribute(SDL_GL_CONTEXT_MINOR_VERSION, 0);
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1);
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, 24);
A couple things:
As jumapico pointed out make sure you actually request an ES 2.0 context from SDL otherwise you might get a regular desktop OpenGL context that happens to support OpenGL ES's #version 100 GLSL code; desktop OpenGL requires that you turn on gl_PointSize via glEnable(GL_PROGRAM_POINT_SIZE).
Make sure your GL implementation actually supports a gl_PointSize of 128.0 by checking GL_ALIASED_POINT_SIZE_RANGE; the spec only requires implementations support a range of 1.0 to 1.0, anything above that is optional.
Workin' fine on this Debian Buster box:
SDL/OpenGL ES info:
SDL compiled version: 2.0.9
SDL linked version : 2.0.9
GL_VENDOR : X.Org
GL_RENDERER : AMD Radeon (TM) R9 Fury Series (FIJI, DRM 3.27.0, 4.19.0-2-amd64, LLVM 7.0.1)
GL_VERSION : OpenGL ES 3.2 Mesa 18.3.4
GLSL version: OpenGL ES GLSL ES 3.20
gl_PointSize min: 1
gl_PointSize max: 2048
Code:
// g++ `pkg-config --cflags sdl2 glesv2` main.cpp `pkg-config --libs sdl2 glesv2`
#include <SDL.h>
#include <SDL_opengles2.h>
#include <iostream>
void CheckStatus( GLuint obj, bool isShader )
{
GLint status = GL_FALSE, log[ 1 << 11 ] = { 0 };
( isShader ? glGetShaderiv : glGetProgramiv )( obj, isShader ? GL_COMPILE_STATUS : GL_LINK_STATUS, &status );
if( status == GL_TRUE ) return;
( isShader ? glGetShaderInfoLog : glGetProgramInfoLog )( obj, sizeof( log ), NULL, (GLchar*)log );
std::cerr << (GLchar*)log << "\n";
std::exit( EXIT_FAILURE );
}
void AttachShader( GLuint program, GLenum type, const char* src )
{
GLuint shader = glCreateShader( type );
glShaderSource( shader, 1, &src, NULL );
glCompileShader( shader );
CheckStatus( shader, true );
glAttachShader( program, shader );
glDeleteShader( shader );
}
const char* const vert = 1 + R"GLSL(
#version 100
precision mediump float;
void main()
{
gl_Position = vec4( 0.0, 0.0, 0.0, 1.0 );
gl_PointSize = 128.0;
}
)GLSL";
const char* const frag = 1 + R"GLSL(
#version 100
precision mediump float;
void main()
{
gl_FragColor = vec4( 1.0, 0.0, 0.0, 1.0 );
}
)GLSL";
int main( int argc, char** argv )
{
SDL_Init( SDL_INIT_VIDEO );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_PROFILE_MASK, SDL_GL_CONTEXT_PROFILE_ES );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MAJOR_VERSION, 2 );
SDL_GL_SetAttribute( SDL_GL_CONTEXT_MINOR_VERSION, 0 );
SDL_GL_SetAttribute( SDL_GL_DOUBLEBUFFER, 1 );
SDL_GL_SetAttribute( SDL_GL_DEPTH_SIZE, 24 );
SDL_Window *window = SDL_CreateWindow( "SDL2", 0, 0, 640, 480, SDL_WINDOW_OPENGL );
SDL_GLContext context = SDL_GL_CreateContext( window );
// SDL info
SDL_version compiled;
SDL_version linked;
SDL_VERSION( &compiled );
SDL_GetVersion( &linked );
std::cout << "SDL compiled version: " << (int)compiled.major << "." << (int)compiled.minor << "." << (int)compiled.patch << "\n";
std::cout << "SDL linked version : " << (int)linked.major << "." << (int)linked.minor << "." << (int)linked.patch << "\n";
// GL info
std::cout << "GL_VENDOR : " << glGetString( GL_VENDOR ) << "\n";
std::cout << "GL_RENDERER : " << glGetString( GL_RENDERER ) << "\n";
std::cout << "GL_VERSION : " << glGetString( GL_VERSION ) << "\n";
std::cout << "GLSL version: " << glGetString( GL_SHADING_LANGUAGE_VERSION ) << "\n";
float pointSizeRange[2] = { -1.0, -1.0 };
glGetFloatv( GL_ALIASED_POINT_SIZE_RANGE, pointSizeRange );
std::cout << "gl_PointSize min: " << pointSizeRange[0] << "\n";
std::cout << "gl_PointSize max: " << pointSizeRange[1] << "\n";
GLuint prog = glCreateProgram();
AttachShader( prog, GL_VERTEX_SHADER, vert );
AttachShader( prog, GL_FRAGMENT_SHADER, frag );
glLinkProgram( prog );
CheckStatus( prog, false );
glUseProgram( prog );
bool running = true;
while( running )
{
SDL_Event ev;
while( SDL_PollEvent( &ev ) )
{
if( ev.type == SDL_QUIT )
{
running = false;
}
}
glClearColor( 0.0f, 0.0f, 0.0f, 0.0f );
glClear( GL_COLOR_BUFFER_BIT );
glDrawArrays( GL_POINTS, 0, 1 );
SDL_GL_SwapWindow( window );
}
SDL_GL_DeleteContext( context );
SDL_DestroyWindow( window );
SDL_Quit();
return EXIT_SUCCESS;
}

Drawing With a Shader Storage Object Not Working

With all of my objects that are to be rendered, I use glDrawElements. However, my venture into Compute Shaders has left me a setup that uses glDrawArrays. As with many who are breaching the topic, I used this PDF as a basis. The problem is that when it is rendered, nothing appears.
#include "LogoTail.h"
LogoTail::LogoTail(int tag1) {
tag = tag1;
needLoad = false;
shader = LoadShaders("vertex-shader[LogoTail].txt","fragment-shader[LogoTail].txt");
shaderCompute = LoadShaders("compute-shader[LogoTail].txt");
for( int i = 0; i < NUM_PARTICLES; i++ )
{
points[ i ].x = 0.0f;
points[ i ].y = 0.0f;
points[ i ].z = 0.0f;
points[ i ].w = 1.0f;
}
glGenBuffers( 1, &posSSbo);
glBindBuffer( GL_SHADER_STORAGE_BUFFER, posSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(points), points, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
for( int i = 0; i < NUM_PARTICLES; i++ )
{
times[ i ].x = 0.0f;
}
glGenBuffers( 1, &birthSSbo);
glBindBuffer( GL_SHADER_STORAGE_BUFFER, birthSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(times), times, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
for( int i = 0; i < NUM_PARTICLES; i++ )
{
vels[ i ].vx = 0.0f;
vels[ i ].vy = 0.0f;
vels[ i ].vz = 0.0f;
vels[ i ].vw = 0.0f;
}
glGenBuffers( 1, &velSSbo );
glBindBuffer( GL_SHADER_STORAGE_BUFFER, velSSbo );
glBufferData( GL_SHADER_STORAGE_BUFFER, sizeof(vels), vels, GL_STATIC_DRAW );
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
}
void LogoTail::Update(const double dt, float sunTime,glm::vec3 sunN) {
position=glm::translate(glm::mat4(), glm::vec3(4.5f,0,0));
}
void LogoTail::Draw(shading::Camera& camera){
shaderCompute->use();
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 4, posSSbo );
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 5, velSSbo );
glBindBufferBase( GL_SHADER_STORAGE_BUFFER, 6, birthSSbo );
glDispatchCompute( NUM_PARTICLES / WORK_GROUP_SIZE, 1, 1 );
glMemoryBarrier( GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT );
shaderCompute->stopUsing();
shader->use();
shader->setUniform("camera", camera.matrix());
shader->setUniform("model",position);
glBindBuffer( GL_ARRAY_BUFFER, posSSbo );
glVertexPointer( 4, GL_FLOAT, 0, (void *)0 );
glEnableClientState( GL_VERTEX_ARRAY );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableClientState( GL_VERTEX_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
shader->stopUsing();
}
The header contains the needed structures and other variables so they do not fall out of scope for the specific object.
Here is the compute shader itself.
#version 430 core
#extension GL_ARB_compute_shader : enable
#extension GL_ARB_shader_storage_buffer_object : enable
layout( std140, binding=4 ) buffer Pos
{
vec4 Positions[ ]; // array of vec4 structures
};
layout( std140, binding=5 ) buffer Vel
{
vec4 Velocities[ ]; // array of vec4 structures
};
layout( std140, binding=6 ) buffer Tim
{
float BirthTimes[ ]; // array of structures
};
layout( local_size_x = 128, local_size_y = 1, local_size_z = 1 ) in;
const vec3 G = vec3( 0., -0.2, 0. );
const float DT = 0.016666;
void main() {
uint gid = gl_GlobalInvocationID.x; // the .y and .z are both 1
vec3 p = Positions[ gid ].xyz;
vec3 v = Velocities[ gid ].xyz;
vec3 pp = p + v*DT + .5*DT*DT*G;
vec3 vp = v + G*DT;
Positions[ gid ].xyz = pp;
Velocities[ gid ].xyz = vp;
}
For testing purposes I lowered the gravity.
I believe that nothing is out of scope, nor is there a needed bind, but yet it alludes me to why the particles are not drawing.
In addition, I also added a geometry shader that constructs a quad around each point but it did not solve anything.
Last 5 lines seems to me problematic:
glBindBuffer( GL_ARRAY_BUFFER, posSSbo );
glVertexPointer( 4, GL_FLOAT, 0, (void *)0 );
glEnableClientState( GL_VERTEX_ARRAY );
glDrawArrays( GL_POINTS, 0, NUM_PARTICLES );
glDisableClientState( GL_VERTEX_ARRAY );
glBindBuffer( GL_ARRAY_BUFFER, 0 );
My guess is You are trying to use old way of doing things in programmable pipeline.I am not sure how it is stated in OpenGL specs but it seems that in the newer versions (GL4.2) you are forced to bind your vertex buffers to VAO(may be that is vendor specific rules?).Once I needed to implement OIT and tried Cyril Crassin's demo which was using buffers with elements draw-just like you.I am using GL4.2 and NVidia cards.Nothing was showing up.I then bound them to a VAO and the issue was gone.So that is what I suggest you to try.