I am currently trying to do image processing using OpenGL ES. I am trying to do basic image effects like blurring switching color space and so on.
I want to build the simplest program to do the following things:
Image loading
Image processing (using shader)
Image saving
I have managed to build the following setup :
An OpenGL context
The image I want to do effects on loaded using DevIL.
Two shaders (one vertex shaders and one fragment shaders)
I am now stuck at using the image I loaded to send data to fragment shader. What I am trying to do is to send the image as a sampler2D to the fragment shader and apply treatment on it.
I have multiple questions such as:
Do I need a vertex shader if all I want to do is pure 2D image processing ?
If I do, what should be done in this vertex shader as I have no vertices at all. Should I create quad vertices (like (0,0) (1, 0) (0, 1) (1, 1)) ? If so, why ?
Do I need to use things like VBO (which seems to be related to the vertex shader), FBO or other thing like that ?
Can't I just load my image into the texture and wait for the fragment shader to do everything I want on this texture ?
Can someone provides some simple piece of "clean" code that could help me understand (without any fancy classes that makes the understanding so complicated) ?
Here is what my fragment shader looks like for simple color swapping:
uniform int width;
uniform int height;
uniform sampler2D texture;
void main() {
vec2 texcoord = vec2(gl_FragCoord.x/width, gl_FragCoord.y/height);
vec4 texture_value = texture2D(texture, texcoord);
gl_FragColor = texture_value.bgra;
}
and my main.cpp :
int main(int argc, char** argv)
{
if (argc != 4) {
std::cerr << "Usage: " << argv[0] << " <vertex shader path> <fragment shader path> <image path>" << std::endl;
return EXIT_FAILURE;
}
// Get an EGL valid display
EGLDisplay display;
display = eglGetDisplay(EGL_DEFAULT_DISPLAY);
if (display == EGL_NO_DISPLAY) {
std::cerr << "Failed to get EGL Display" << std::endl
<< "Error: " << eglGetError() << std::endl;
return EXIT_FAILURE;
}
else {
std::cerr << "Successfully get EGL Display." << std::endl;
}
// Create a connection to the display
int minor, major;
if (eglInitialize(display, &minor, &major) == EGL_FALSE) {
std::cerr << "Failed to initialize EGL Display" << std::endl
<< "Error: " << eglGetError() << std::endl;
eglTerminate(display);
return EXIT_FAILURE;
}
else {
std::cerr << "Successfully intialized display (OpenGL ES version " << minor << "." << major << ")." << std::endl;
}
// OpenGL ES Config are used to specify things like multi sampling, channel size, stencil buffer usage, & more
// See the doc: https://www.khronos.org/registry/EGL/sdk/docs/man/html/eglChooseConfig.xhtml for more informations
EGLConfig config;
EGLint num_configs;
if (!eglChooseConfig(display, configAttribs, &config, 1, &num_configs)) {
std::cerr << "Failed to choose EGL Config" << std::endl
<< "Error: " << eglGetError() << std::endl;
eglTerminate(display);
return EXIT_FAILURE;
}
else {
std::cerr << "Successfully choose OpenGL ES Config ("<< num_configs << ")." << std::endl;
}
// Creating an OpenGL Render Surface with surface attributes defined above.
EGLSurface surface = eglCreatePbufferSurface(display, config, pbufferAttribs);
if (surface == EGL_NO_SURFACE) {
std::cerr << "Failed to create EGL Surface." << std::endl
<< "Error: " << eglGetError() << std::endl;
}
else {
std::cerr << "Successfully created OpenGL ES Surface." << std::endl;
}
eglBindAPI(EGL_OPENGL_API);
EGLContext context = eglCreateContext(display, config, EGL_NO_CONTEXT, contextAttribs);
if (context == EGL_NO_CONTEXT) {
std::cerr << "Failed to create EGL Context." << std::endl
<< "Error: " << eglGetError() << std::endl;
}
else {
std::cerr << "Successfully created OpenGL ES Context." << std::endl;
}
//Bind context to surface
eglMakeCurrent(display, surface, surface, context);
// Create viewport and check if it has been created correctly
glViewport(0, 0, WIDTH, HEIGHT);
GLint viewport[4];
glGetIntegerv(GL_VIEWPORT, viewport);
if (viewport[2] != WIDTH || viewport[3] != HEIGHT) {
std::cerr << "Failed to create the viewport. Size does not match (glViewport/glGetIntegerv not working)." << std::endl
<< "OpenGL ES might be faulty!" << std::endl
<< "If you are on Raspberry Pi, you should not updated EGL as it will install fake EGL." << std::endl;
eglTerminate(display);
return EXIT_FAILURE;
}
// Clear buffer and get ready to draw some things
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Create a shader program
GLuint program = load_shaders(std::string(argv[1]), std::string(argv[2]));
if (program == -1)
{
std::cerr << "Failed to create a shader program. See above for more details." << std::endl;
eglTerminate(display);
return EXIT_FAILURE;
}
/* Initialization of DevIL */
if (ilGetInteger(IL_VERSION_NUM) < IL_VERSION) {
std::cerr << "Failed to use DevIL: Wrong version." << std::endl;
return EXIT_FAILURE;
}
ilInit();
ILuint image = load_image(argv[3]);
GLuint texId;
glGenTextures(1, &texId); /* Texture name generation */
glBindTexture(GL_TEXTURE_2D, texId); /* Binding of texture name */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); /* We will use linear interpolation for magnification filter */
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); /* We will use linear interpolation for minifying filter */
glTexImage2D(GL_TEXTURE_2D, 0, ilGetInteger(IL_IMAGE_BPP), ilGetInteger(IL_IMAGE_WIDTH), ilGetInteger(IL_IMAGE_HEIGHT),
0, ilGetInteger(IL_IMAGE_FORMAT), GL_UNSIGNED_BYTE, ilGetData()); /* Texture specification */
Thanks.
Do I need a vertex shader as all I want to do is pure 2D image processing ?
Using vertex and fragment shaders is mandatory in OpenGL ES 2.
If I do, what should be done in this vertex shader as I have no vertices at all. Should I create quad vertices (like (0,0) (1, 0) (0, 1) (1, 1)) ? If so, why ?
Yes. Because that's how OpenGL ES 2 works. Otherwise you would need to use something like computer shaders (supported in OpenGL ES 3.1+) or OpenCL.
Do I need to use things like VBO (which seems to be related to the vertex shader), FBO or other thing like that ?
Using VBO/IBO won't make practically any difference for you since you only have 4 vertices and 2 primitives. You may want to render to texture, depending on your needs.
Can't I just load my image into the texture and wait for the fragment shader to do everything I want on this texture ?
No.
Related
I am learning opengl right now. I have bought a book called OpenGL Superbible. But I couldn't survived to properly configure the environment. I use GLFW 3.2 as windowing toolkit (if that is what it is called) and GLEW 2.0.
I am trying to compile and use shaders to draw on screen. According to the book this should draw a triangle on screen. But it doesn't. Instead, it shows the clear background color that is set by glClearColor.
This is the Code:
#include <iostream>
#include <GLFW\glfw3.h>
#include <GL\glew.h>
GLuint CompileShaders();
int main(void) {
// Initialise GLFW
if (!glfwInit()) {
fprintf(stderr, "Failed to initialize GLFW\n");
getchar();
return -1;
}
// Open a window and create its OpenGL context
GLFWwindow *window;
window = glfwCreateWindow(1024, 768, "Tutorial 01", NULL, NULL);
if (window == NULL) {
fprintf(stderr, "Failed to open GLFW window. If you have an Intel GPU, "
"they are not 3.3 compatible. Try the 2.1 version of the "
"tutorials.\n");
getchar();
glfwTerminate();
return -1;
}
glfwMakeContextCurrent(window);
// Initialize GLEW
if (glewInit() != GLEW_OK) {
fprintf(stderr, "Failed to initialize GLEW\n");
getchar();
glfwTerminate();
return -1;
}
// Ensure we can capture the escape key being pressed below
glfwSetInputMode(window, GLFW_STICKY_KEYS, GL_TRUE);
GLuint RederingProgram = CompileShaders();
GLuint VertexArrayObject;
glCreateVertexArrays(1, &VertexArrayObject);
glBindVertexArray(VertexArrayObject);
int LoopCounter = 0;
do {
// Clear the screen. It's not mentioned before Tutorial 02, but it can cause
// flickering, so it's there nonetheless.
/*const GLfloat red[] = {
(float)sin(LoopCounter++ / 100.0f)*0.5f + 0.5f,
(float)cos(LoopCounter++ / 100.0f)*0.5f + 0.5f,
0.0f, 1.0f
};*/
// glClearBufferfv(GL_COLOR, 0, red);
// Draw nothing, see you in tutorial 2 !
glUseProgram(RederingProgram);
glDrawArrays(GL_TRIANGLES, 0, 3);
// Swap buffers
glfwSwapBuffers(window);
glfwPollEvents();
} // Check if the ESC key was pressed or the window was closed
while (glfwGetKey(window, GLFW_KEY_ESCAPE) != GLFW_PRESS &&
glfwWindowShouldClose(window) == 0);
// Close OpenGL window and terminate GLFW
glfwTerminate();
return 0;
}
GLuint CompileShaders() {
GLuint VertexShader;
GLuint FragmentShader;
GLuint Program;
static const GLchar *VertexShaderSource[] = {
"#version 450 core "
" "
"\n",
" "
" \n",
"void main(void) "
" "
"\n",
"{ "
" "
" \n",
"const vec4 vertices[3] = vec4[3](vec4(0.25, -0.25, 0.5, 1.0),\n",
" "
"vec4(-0.25, -0.25, 0.5, 1.0),\n",
" "
"vec4(0.25, 0.25, 0.5, 1.0)); \n",
" gl_Position = vertices[gl_VertexID]; \n",
"} "
" "
" \n"};
static const GLchar *FragmentShaderSource[] = {
"#version 450 core "
" "
"\n",
" "
" \n",
"out vec4 color; \n",
" "
" \n",
"void main(void) "
" "
"\n",
"{ "
" "
" \n",
" color = vec4(0.0, 0.8, 1.0, 1.0); \n",
"} "
" "
" \n"};
// Create and compile vertex shader.
VertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(VertexShader, 1, VertexShaderSource, NULL);
glCompileShader(VertexShader);
// Create and compile fragment shader.
FragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(FragmentShader, 1, FragmentShaderSource, NULL);
glCompileShader(FragmentShader);
// Create program, attach shaders to it, and link it
Program = glCreateProgram();
glAttachShader(Program, VertexShader);
glAttachShader(Program, FragmentShader);
glLinkProgram(Program);
// Delete the shaders as the program has them now.
glDeleteShader(FragmentShader);
glDeleteShader(VertexShader);
return Program;
}
I am working in visual studio 2015. I have all the libraries to develop some opengl (I think), but somethig is wrong. Please help me. By the way, glCreateVertexArrays() function is only in Opengl 4.5 and above, I know, since the book is explained in opengl 4.5.
I will go crazy soon because no proper tutorials for beginners. People who have learned this are very ambitious people. I bow before those people.
Your shaders shouldn't compile:
glShaderSource(VertexShader, 1, VertexShaderSource, NULL);
This tells the GL that it should expect an array of 1 GLchar pointers. However, your GLSL code is actually split into several individual strings (note the commas);
static const GLchar *VertexShaderSource[] = {
"...GLSL-code..."
"...GLSL-code..."
"...GLSL-code...", // <- this comma ends the first string vertexShaderSource[0]
"...GLSL-code..." // vertexShaderSource[1] starts here
[...]
There are two possible solutions:
Just remove those commas, so that your array contains of just one element pointing to the whole GLSL source as one string.
Tell the GL the truth about your data:
glShaderSoure(..., sizeof(vertexShaderSource)/sizeof(vertexShaderSource[0]), vertexShaderSource, ,,,)
Apart from that, you should always query the compilation and link status of your shaders and program objects. Also query the shader compilation and program link info logs. They will contain human-readbale messages telling you why the compilation / link did fail.
As part of a 3D mesh viewer I am making in QT with QOpenGLWidget, I need to provide the ability for a user to click a node within the model. To restrict selection to visible nodes only, I have tried to include glReadPixels (GL_DEPTH_COMPONENT) in my selection algorithm.
My problem is that glReadPixels(depth) always returns 0. All the error outputs in the code below return 0 as well. glReadPixels(red) returns correct values:
GLenum err = GL_NO_ERROR;
QTextStream(stdout) << "error before reading gl_red = " << err << endl;
GLfloat winX, winY, myred, mydepth;
winX = mousex;
winY = this->height() - mousey;
glReadPixels(winX,winY,1,1,GL_RED,GL_FLOAT, &myred);
QTextStream(stdout) << "GL RED = " << myred << endl;
err = glGetError();
QTextStream(stdout) << "error after reading gl_red = " << err << endl;
glReadPixels(winX,winY,1,1,GL_DEPTH_COMPONENT,GL_FLOAT, &mydepth);
QTextStream(stdout) << "GL_DEPTH_COMPONENT = " << mydepth << endl;
err = glGetError();
QTextStream(stdout) << "error after reading gl_depth = " << err << endl;
My normal 3D rendering is working fine, I have glEnable(GL_DEPTH_TEST) in my initializeGL() function. At the moment I'm not using any fancy VBOs or VAOs. FaceMeshQualityColor and triVertices are both datatype QVector<QVector3D>. My current face rendering follows the following progression:
shader = shaderVaryingColor;
shader->bind();
shader->setAttributeArray("color", FaceMeshQualityColor.constData());
shader->enableAttributeArray("color");
shader->setUniformValue("mvpMatrix", pMatrix * vMatrix * mMatrix);
shader->setAttributeArray("vertex", triVertices.constData());
shader->enableAttributeArray("vertex");
glEnable(GL_POLYGON_OFFSET_FILL);
glPolygonOffset(1,1);
glDrawArrays(GL_TRIANGLES, 0, triVertices.size());
glDisable(GL_POLYGON_OFFSET_FILL);
shader->disableAttributeArray("vertex");
shader->disableAttributeArray("color");
shader->release();
In my main file I explicitly set my OpenGL version to something with glReadPixels(GL_DEPTH_COMPONENT) functionality (as opposed to OpenGL ES 2.0):
int main(int argc, char *argv[])
{
QApplication a(argc, argv);
QSurfaceFormat format;
format.setVersion(2, 1);
format.setProfile(QSurfaceFormat::CoreProfile);
format.setDepthBufferSize(32);
QSurfaceFormat::setDefaultFormat(format);
MainWindow w;
w.showMaximized();
return a.exec();
}
Is my problem of glReadPixels(depth) not working somehow related to my treatment of my depth buffer?
Do I need to 'activate' the depth buffer to be able to read from it before I call glReadPixels? Or do I need to have my vertex shader explicitly write depth location to some other object?
QOpenGLWidget works into an underlying FBO and you can't simply read depth component from that FBO if multi sampling is enabled. The easiest solution is to set the samples to zero, so your code will look like this:
QSurfaceFormat format;
format.setVersion(2, 1);
format.setProfile(QSurfaceFormat::CoreProfile);
format.setDepthBufferSize(32);
format.setSamples(0);
QSurfaceFormat::setDefaultFormat(format);
Or you can use multisampling, but an additional FBO will be required without multisampling where the depth buffer can be copied.
class MyGLWidget : public QOpenGLWidget, protected QOpenGLFunctions
{
//
// OTHER WIDGET RELATED STUFF
//
QOpenGLFramebufferObject* mFBO = nullptr;
MyGLWidget::paintGL()
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
//
// DRAW YOUR SCENE HERE!
//
QOpenGLContext *ctx = QOpenGLContext::currentContext();
// FBO must be re-created! is there a way to reset it?
if(mFBO) delete mFBO;
QOpenGLFramebufferObjectFormat format;
format.setSamples(0);
format.setAttachment(QOpenGLFramebufferObject::CombinedDepthStencil);
mFBO = new QOpenGLFramebufferObject(size(), format);
glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->defaultFramebufferObject());
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, mFBO->handle());
ctx->extraFunctions()->glBlitFramebuffer(0, 0, width(), height(), 0, 0, mFBO->width(), mFBO->height(), GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT, GL_NEAREST);
mFBO->bind(); // must rebind, otherwise it won't work!
float mouseDepth = 1.f;
glReadPixels(mouseX, mouseY, 1, 1, GL_DEPTH_COMPONENT, GL_FLOAT, &mouseDepth);
mFBO->release();
}
};
I have this issue with my loader:
int loadTexture(char *file)
{
// Load the image
SDL_Surface *tex = IMG_Load(file);
GLuint t;
cout << "Loading image: " << string(file) << "\n";
if (tex) {
glGenTextures(1, &t); // Generating 1 texture
glBindTexture(GL_TEXTURE_2D, t); // Bind the texture
glTexImage2D(GL_TEXTURE_2D, 0, 3, tex->w, tex->h, 0, GL_RGB, GL_UNSIGNED_BYTE, tex->pixels); // Map texture
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); // Set minifying parameter to linear
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); // Set magnifying parameter to linear
SDL_FreeSurface(tex); // Free the surface from memory
cout << " > Image loaded: " << string(file) << "\n\n";
return t; // return the texture index
}
else {
cout << " > Failed to load image: " << IMG_GetError() << "\n\n";
SDL_FreeSurface(tex); // Free the surface from memory
return -1; // return -1 in case the image failed to load
}
}
It loads the images just fine but only the last image loaded is used when drawing my objects:
textTest = loadTexture("assets/test_texture_64.png");
textTest2 = loadTexture("assets/test_texture2_64.png");
textTest3 = loadTexture("assets/test_texture3_64.png");
textTest4 = loadTexture("assets/test_texture4_64.png");
Texture files:
http://i.imgur.com/2K9NsZF.png
The program running:
http://i.imgur.com/5FMrA1b.png
Before drawing an object I use glBindTexture(GL_TEXTURE_2D, t) where t is the name of the texture I want to use. I'm new to OpenGL and C++ so I'm having trouble understanding the issue here.
You should check if loadTexture returns different texture IDs when you load the textures. Then you need to be sure that you bind the right textures onto the object using glBindTexture(...) which you say you are doing already.
How are you drawing your object right now? Is multi texturing involved? Be sure to have the right glPushMatrix / glPopMatrix calls before and after drawing your object.
From looking at your loader it looks correct to me although you do not glEnable and glDisable GL_TEXTURE_2D but that should not matter.
I'm writing an application that renders every frame of in interleaved stereoscopic 3d. To make this happen, I am writing two fragment shaders: one to render the left eye's frame's odd rows, and one to render the even rows of pixels of the right frame.
I was using OSX's builtin OpenGL Shader Builder application, and I was able to successfully render every odd row as green:
As you can see, the frag code I'm using looks like this:
void main(){
if ( mod(gl_FragCoord.y - 0.5, 2.0) == 1.0){
gl_FragCoord = vec4(0.0, 1.0, 0.0, 1.0);
}
}
However, I wrote a small OpenGL application to test this shader (Btw, this is NVIDIA OpenGL 2.1, OSX 10.6.8):
#include <iostream>
#include <stdio.h>
#ifdef __APPLE__
#include <OpenGL/gl.h>
#include <OpenGL/glu.h>
#include <GLUT/glut.h>
void DrawGLScene(){
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
glFlush();
glutSwapBuffers();
}
void Shading(){
//Fragment shader we want to use
GLuint oddRowShaderId = glCreateShader(GL_FRAGMENT_SHADER);
std::cout << "Creating the fragment shader with id " << oddRowShaderId << std::endl;
const GLchar *source[] =
{ "void main(){ \n",
" if (mod(gl_FragCoord.y-0.5, 2.0) == 0.0){\n",
" gl_FragColor = vec4( 0.0, 1.0, 0.0, 1.0 );\n",
//" gl_BackColor = vec4( 0.0, 1.0, 0.0, 1.0 );\n"
" }\n",
"}\n"
};
std::cout << "Shader source:\n" << source[0] << source[1] << source[2] << source[3] < < source[4] << std::endl;
std::cout << "Gathering shader source code" << std::endl;
glShaderSource(oddRowShaderId, 1, source, 0);
std::cout << "Compiling the shader" << std::endl;
glCompileShader(oddRowShaderId);
std::cout << "Creating new glCreateProgram() program" << std::endl;
GLuint shaderProgramId = glCreateProgram(); //Shader program id
std::cout << "Attaching shader to the new program" << std::endl;
glAttachShader(shaderProgramId, oddRowShaderId); //Add the fragment shader to the program
std::cout << "Linking the program " << std::endl;
glLinkProgram(shaderProgramId); //Link the program
std::cout << "Using the shader program for rendering" << std::endl;
glUseProgram(shaderProgramId); //Start using the shader
}
void keyboard(int key, int x, int y){
switch(key){
case 's':
Shading();
break;
case 'q':
exit(0);
break;
}
}
void idleFunc(){
glutPostRedisplay(); //Redraw the scene.
}
int main(int argc, char** argv){
glutInit(&argc, argv);
glutInitWindowPosition(100, 100);
glutInitWindowSize(1000,1000);
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH);
glutCreateWindow("anaglyph test");
glutDisplayFunc(DrawGLScene);
glutSpecialFunc(keyboard);
glutIdleFunc(idleFunc);
glutMainLoop();
}
This is the output I get from running the code:
I have a feeling I may not be compiling and glUsePrograming the fragment shader correctly.
Your can save some performance and gain some compatibility and it will result in less headache by using 2 pixel texture mask (even pixels are black, odd - white) and tile it on top of full-screen quads (you will have 2 of them: right eye and left eye). Use inverted mask or put some coord offset on second quad. In general it will look like this:
(perspective and huge gaps added for visualization)
Put scene texture(or whatever you have) on quad and use mask color to discard fragments or transparency. To avoid using 2 g-buffers (GPU memory) you can render a full scene for one eye in texture and for another in view and on top of it render full-screen quad with first scene texture and transparency mask.
Or render them separate if you need two outputs.
Main point is that you should use masking instead of logic operations based on texture coordinates.
For an algorithm of mine I need to be able to access the depth buffer. I have no problem at all doing this using glReadPixels, but reading an 800x600 window is extremely slow (From 300 fps to 20 fps)
I'm reading a lot about this and I think dumping the depth buffer to a texture would be faster. I know how to create a texture, but how do I get the depth out?
Creating an FBO and creating the texture from there might be even faster, at the moment I am using an FBO (but still in combination with glReadPixels).
So what is the fastest way to do this?
(I'm probably not able to use GLSL because I don't know anything about it and I don't have much time left to learn, deadlines!)
edit:
Would a PBO work? As described here: http://www.songho.ca/opengl/gl_pbo.html it can go a lot faster but I can not change buffers all the time as in the example.
Edit2:
How would I go about putting the depth data in the PBO? At the moment I do:
glGenBuffersARB(1, &pboId);
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, pboId);
glBufferDataARB(GL_PIXEL_PACK_BUFFER_ARB, 800*600*sizeof(GLfloat),0, GL_STREAM_READ_ARB);
and right before my readpixels i call glBindbuffer again. The effect is that I read nothing at all. If I disable the PBO's it all works.
Final edit:
I guess I solved it, I had to use:
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, pboId);
glReadPixels( 0, 0,Engine::fWidth, Engine::fHeight, GL_DEPTH_COMPONENT,GL_FLOAT, BUFFER_OFFSET(0));
GLuint *pixels = (GLuint*)glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY);
This gave me a 20 FPS increase. It's not that much but it's something.
So, I used 2 PBO's but I'm still encountering a problem: My code only gets executed once.
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, pboIds[index]);
std::cout << "Reading pixels" << std::endl;
glReadPixels( 0, 0,Engine::fWidth, Engine::fHeight, GL_DEPTH_COMPONENT,GL_FLOAT, BUFFER_OFFSET(0));
std::cout << "Getting pixels" << std::endl;
// glBufferDataARB(GL_PIXEL_PACK_BUFFER_ARB, 800*600*sizeof(GLfloat), 0, GL_STREAM_DRAW_ARB);
GLfloat *pixels = (GLfloat*)glMapBufferARB(GL_PIXEL_PACK_BUFFER_ARB, GL_READ_ONLY);
int count = 0;
for(int i = 0; i != 800*600; ++i){
std::cout << pixels[i] << std::endl;
}
The last line executes once, but only once, after that it keeps on calling the method (which is normal) but stops at the call to pixels.
I apparently forgot to load glUnMapBuffers, that kinda solved it, though my framerate is slower again..
I decided giving FBO's a go, but I stumbled across a problem:
Initialising FBO:
glGenFramebuffersEXT(1, framebuffers);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebuffers[0]);
std::cout << "framebuffer generated, id: " << framebuffers[0] << std::endl;
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glGenRenderbuffersEXT(1,renderbuffers);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, renderbuffers[0]);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, 800, 600);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, renderbuffers[0]);
bool status = checkFramebufferStatus();
if(!status)
std::cout << "Could not initialise FBO" << std::endl;
else
std::cout << "FBO ready!" << std::endl;
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
My drawing loop:
GLenum errCode;
const GLubyte *errString;
if ((errCode = glGetError()) != GL_NO_ERROR) {
errString = gluErrorString(errCode);
fprintf (stderr, "OpenGL Error: %s\n", errString);
}
++frameCount;
// ----------- First pass to fill the depth buffer -------------------
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, framebuffers[0]);
std::cout << "FBO bound" << std::endl;
//Enable depth testing
glEnable(GL_DEPTH_TEST);
glDisable(GL_STENCIL_TEST);
glDepthMask( GL_TRUE );
//Disable stencil test, we don't need that for this pass
glClearStencil(0);
glEnable(GL_STENCIL_TEST);
//Disable drawing to the color buffer
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE);
//We clear all buffers and reset the modelview matrix
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glLoadIdentity();
//We set our viewpoint
gluLookAt(eyePoint[0],eyePoint[1], eyePoint[2], 0.0,0.0,0.0,0.0,1.0,0.0);
//std::cout << angle << std::endl;
std::cout << "Writing to FBO depth" << std::endl;
//Draw the VBO's, this does not draw anything to the screen, we are just filling the depth buffer
glDrawElements(GL_TRIANGLES, 120, GL_UNSIGNED_SHORT, BUFFER_OFFSET(0));
After this I call a function that calls glReadPixels()
The function does not even get called. The loop restarts at the function call.
Apparently I solved this as well: I had to use
glReadPixels( 0, 0,Engine::fWidth, Engine::fHeight, GL_DEPTH_COMPONENT,GL_UNSIGNED_SHORT, pixels);
With GL_UNSIGNED_SHORT instead of GL_FLOAT (or any other format for that matter)
The fastest way of doing this is using asynchronous pixel buffer objects, there's a good explanation here:
http://www.songho.ca/opengl/gl_pbo.html
I would render to a FBO and read its depth buffer after the frame has been rendered. PBOs are outdated technology.