Depth testing doesn't work when using custom framebuffer - c++

I'm stydying framebuffers and I've made a mirror in my scene. It works fine except the depth testing. Got stuck trying to make it work. (when rendering to default frame buffer - depth testing works fine). Would appreciate any help. Here is the code:
glEnable( GL_DEPTH_TEST );
glViewport( 0, 0, 512, 512 );
unsigned int fbo;
glGenFramebuffers( 1, &fbo );
glBindFramebuffer( GL_FRAMEBUFFER, fbo );
unsigned int rbo;
glGenRenderbuffers( 1, &rbo );
glBindRenderbuffer( GL_RENDERBUFFER, rbo );
glRenderbufferStorage( GL_RENDERBUFFER, GL_DEPTH, 512, 512 );
glBindRenderbuffer( GL_RENDERBUFFER, 0 );
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, rbo ); //if remove this, mirror works but without depth test
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
this->mirror->texturePack[0]->textureId(), 0 );
//render scene from mirror camera
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
glViewport( 0, 0, this->width(), this->height() );
//render scene from main camera

Your farme buffer is inclomplete, because GL_DEPTH is no valid internal format for a render buffer storage.
See glRenderbufferStorage. Try GL_DEPTH_COMPONENT16, GL_DEPTH_COMPONENT24 or GL_DEPTH_COMPONENT32:
glRenderbufferStorage( GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, 512, 512 );
See OpenGL 4.6 core profile specification, 9.4 Framebuffer Completeness, page 323:
The internal formats of the attached images can affect the completeness of
the framebuffer, so it is useful to first define the relationship between the internal
format of an image and the attachment points to which it can be attached.
• An internal format is depth-renderable if it is DEPTH_COMPONENT or one
of the formats from table 8.13 whose base internal format is DEPTH_-
COMPONENT or DEPTH_STENCIL. No other formats are depth-renderable.
Note, the framebuffer completeness can be checked by:
glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE

I've solved it finally. I've added the glClear( GL_DEPTH_BUFFER_BIT ) command right after binding mirror framebuffer and after that it worked.

Related

deferred rendering - Renderbuffer vs Texture

So, I've been reading about this, and I still haven't found a conclusion. Some examples use textures as their render targets, some people use renderbuffers, and some use both!
For example, using just textures:
// Create the gbuffer textures
glGenTextures(ARRAY_SIZE_IN_ELEMENTS(m_textures), m_textures);
glGenTextures(1, &m_depthTexture);
for (unsigned int i = 0 ; i < ARRAY_SIZE_IN_ELEMENTS(m_textures) ; i++) {
glBindTexture(GL_TEXTURE_2D, m_textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, WindowWidth, WindowHeight, 0, GL_RGB, GL_FLOAT, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i, GL_TEXTURE_2D, m_textures[i], 0);
}
both:
glGenRenderbuffersEXT ( 1, &m_diffuseRT );
glBindRenderbufferEXT ( GL_RENDERBUFFER_EXT, m_diffuseRT );
glRenderbufferStorageEXT ( GL_RENDERBUFFER_EXT, GL_RGBA, m_width, m_height );
glFramebufferRenderbufferEXT ( GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_RENDERBUFFER_EXT, m_diffuseRT );
glGenTextures ( 1, &m_diffuseTexture );
glBindTexture ( GL_TEXTURE_2D, m_diffuseTexture );
glTexImage2D ( GL_TEXTURE_2D, 0, GL_RGBA, m_width, m_height, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameteri ( GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
// Attach the texture to the FBO
glFramebufferTexture2DEXT ( GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, m_diffuseTexture, 0 );
What's the difference? What's the point of creating a texture, a render buffer, and then assign one to the other? After you successfully supply a texture with an image, it's got its memory allocated, so why does one need to bind it to a render buffer?
Why would one use textures or renderbuffers? What would be the advantages?
I've read that you cannot read from renderbuffer, only texture. Wht's the use of it, then?
EDIT:
So, my current code for a GBuffer is this:
enum class GBufferTextureType
{
Depth = 0,
Position,
Diffuse,
Normal,
TexCoord
};
.
.
.
glGenFramebuffers ( 1, &OpenGLID );
if ( Graphics::GraphicsBackend->CheckError() == false )
{
Delete();
return false;
}
glBindFramebuffer ( GL_FRAMEBUFFER, OpenGLID );
if ( Graphics::GraphicsBackend->CheckError() == false )
{
Delete();
return false;
}
uint32_t TextureGLIDs[5];
glGenTextures ( 5, TextureGLIDs );
if ( Graphics::GraphicsBackend->CheckError() == false )
{
Delete();
return false;
}
// Create the depth texture
glBindTexture ( GL_TEXTURE_2D, TextureGLIDs[ ( int ) GBufferTextureType::Depth] );
glTexImage2D ( GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, In_Dimensions.x, In_Dimensions.y, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL );
glFramebufferTexture2D ( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, TextureGLIDs[ ( int ) GBufferTextureType::Depth], 0 );
// Create the color textures
for ( unsigned cont = 1; cont < 5; ++cont )
{
glBindTexture ( GL_TEXTURE_2D, TextureGLIDs[cont] );
glTexImage2D ( GL_TEXTURE_2D, 0, GL_RGB32F, In_Dimensions.x, In_Dimensions.y, 0, GL_RGB, GL_FLOAT, NULL );
glFramebufferTexture2D ( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + cont, GL_TEXTURE_2D, TextureGLIDs[cont], 0 );
}
// Specify draw buffers
GLenum DrawBuffers[4];
for ( unsigned cont = 0; cont < 4; ++cont )
DrawBuffers[cont] = GL_COLOR_ATTACHMENT0 + cont;
glDrawBuffers ( 4, DrawBuffers );
if ( Graphics::GraphicsBackend->CheckError() == false )
{
Delete();
return false;
}
GLenum Status = glCheckFramebufferStatus ( GL_FRAMEBUFFER );
if ( Status != GL_FRAMEBUFFER_COMPLETE )
{
Delete();
return false;
}
Dimensions = In_Dimensions;
// Unbind
glBindFramebuffer ( GL_FRAMEBUFFER, 0 );
Is this the way to go?
I still have to write the corresponding shaders...
What's the point of creating a texture, a render buffer, and then assign one to the other?
That's not what's happening. But that's OK, because that second example code is errant nonsense. The glFramebufferTexture2DEXT is overriding the binding from glFramebufferRenderbufferEXT. The renderbuffer is never actually used after it is created.
If you found that code online somewhere, I strongly advise you to disregard anything that source told you about OpenGL development. Though I would advise that anyway, since it's using the "EXT" extension functions in 2016, almost a decade since core FBOs became available.
I've read that you cannot read from renderbuffer, only texture. Wht's the use of it, then?
That is entirely the point of them: you use a renderbuffer for images that you don't want to read from. That's not useful for deferred rendering, since you really do want to read from them.
But imagine if you're generating a reflection image of a scene, which you will later use as a texture in your main scene. Well, to render the reflection scene, you need a depth buffer. But you're not going to read from that depth buffer (not as a texture, at any rate); you need a depth buffer for depth testing. But the only image you're going to read from after is the color image.
So you would make the depth buffer a renderbuffer. That tells the implementation that the image can be put into whatever storage is most efficient for use as a depth buffer, without having to worry about read-back performance. This may or may not have a performance impact. But at the very least, it won't be any slower than using a texture.
Most rendering scenarios need a depth and/or stencil buffer, though it is rare that you would ever need to sample the data stored in the stencil buffer from a shader.
It would be impossible to do depth/stencil tests if your framebuffer did not have a location to store these data and any render pass that uses these fragment tests requires a framebuffer with the appropriate images attached.
If you are not going to use the depth/stencil buffer data in a shader, a renderbuffer will happily satisfy storage requirements for fixed-function fragment tests. Renderbuffers have fewer format restrictions than textures do, particularly if we detour this discussion to multisampling.
D3D10 introduced support for multisampled color textures but omitted multisampled depth textures; D3D10.1 later fixed that problem and GL3.0 was finalized after D3D10's initial design oversight was corrected.
Pre-GL3 / D3D10.1 design would manifest itself in GL as a multisampled framebuffer object that allows either texture or renderbuffer color attachments but forces you to use a renderbuffer for the depth attachment.
Renderbuffers are ultimately the lowest common denominator for storage, they will get you through tough jams on feature-limited hardware. You can actually blit the data stored in a renderbuffer into a texture in some situations where you could not draw directly into the texture.
To that end, you can resolve a multisampled renderbuffer into a single-sampled texture by blitting from one framebuffer to another. This is implicit multisampling, and it (would) allow you to use the anti-aliased results of a previous render pass with a standard texture lookup. Unfortunately it is thoroughly useless for anti-aliasing in deferred shading--you need explicit multisample resolve for that.
Nonetheless, it is incorrect to say that a renderbuffer is not readable; it is in every sense of the word, but since your goal is deferred shading, would require additional GL commands to copy the data into a texture.

Precision of glReadPixels when reading unsigned int

I am having problems storing and retrieving 32 bit unsigned from my framebuffer. The max value for a framebuffer on my intel laptop (nvidia card and Ubuntu) is:
4.294.967.295
However, the max value that i can read, for some reason, is only:
1.040.992.698
I was wondering if someone can tell me if I am doing something wrong or if this a limitation of my graphics card.
I am setting up my framebuffer like this:
// generate render and frame buffer objects
glGenRenderbuffers( 1, &colorbufId );
glGenRenderbuffers( 1, &depthbufId );
glGenFramebuffers ( 1, &framebufId );
// setup renderbuffer
glBindRenderbuffer(GL_RENDERBUFFER, colorbufId);
glRenderbufferStorage(GL_RENDERBUFFER, GL_R32UI, _viewWidth, _viewHeight);
// setup depth buffer
glBindRenderbuffer(GL_RENDERBUFFER, depthbufId);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT32, _viewWidth, _viewHeight);
// setup framebuffer
glBindFramebuffer( GL_FRAMEBUFFER, framebufId );
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, colorbufId );
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthbufId );
// check if everything went well
GLenum stat = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(stat != GL_FRAMEBUFFER_COMPLETE) { printf("stat=%d != %d\n", stat, GL_FRAMEBUFFER_COMPLETE); Error(); exit(0); }
// define where the framebuffer outputs will be written
const GLenum bufs[] = { GL_NONE, GL_COLOR_ATTACHMENT0 };
glDrawBuffers(2, bufs);
Now when i try to read a pixel
const uint32_t MAX_UINT = std::numeric_limits< uint32_t >::max();
glClearBufferuiv(GL_RENDERBUFFER, colorbufId, &MAX_UINT);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT );
uint32_t pix_value=0;
glReadPixels(0, 0, 1, 1, GL_RED_INTEGER, GL_UNSIGNED_INT, &pix_value);
the value of pix_value = 1.040.992.698.
I have also tried use 'GL_RED' instead of 'GL_RED_INTEGER' but that still does not work.
(for anyone interested, the reason i'm trying to do the above is for picking objects)
EDIT: So in the end this had nothing to do with precision. I was not clearing the buffer correctly. Se answer below
Ok i found the error:
This is the correct command to clear the buffer:
glClearBufferuiv(GL_COLOR, colorbufId, &UNDEF_IDX);
glClear( /*GL_COLOR_BUFFER_BIT |*/ GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT );

Render to FBO + glReadPixels all black

I am trying to render a simple checkerboard in a FBO and then do a glReadPixels().
When I do it without FBO, everything works fine. So I assume that my render function is ok and so is the glReadPixels(). With the FBO, all I get are the lines that I draw after the calls to FBO have been done.
Here is my code (Python, aiming cross platform):
def renderFBO():
#WhyYouNoWorking(GL_FRAMEBUFFER) # degug function... error checking
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, framebuffer)
glBindRenderbuffer( GL_RENDERBUFFER, renderbufferA)
glRenderbufferStorage( GL_RENDERBUFFER, GL_RGBA, window.width, window.height)
glBindRenderbuffer( GL_RENDERBUFFER, renderbufferB)
glRenderbufferStorage( GL_RENDERBUFFER, GL_DEPTH_COMPONENT, window.width, window.height)
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, framebuffer)
glFramebufferRenderbuffer( GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbufferA)
glFramebufferRenderbuffer( GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, renderbufferB)
#WhyYouNoWorking(GL_FRAMEBUFFER)
glDrawBuffer(GL_COLOR_ATTACHMENT0)
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
glViewport( 0, 0, window.width, window.height)
DrawChecker(Nbr = 16, Dark = 25.0/255, Light = 75.0/255)
for i in range(len(labelSysInfo)):
pyglet.text.Label(labelSysInfo[i], font_name='Times New Roman', font_size=26, x=(window.width*0.68), y= (window.height*0.04*i)+(window.height*2/3), anchor_x='left', anchor_y='center', color = (250, 250, 250, 150)).draw()
glReadPixels(0, 0, window.width, window.height, GL_RGBA, GL_UNSIGNED_BYTE, a)
glBindFramebuffer( GL_FRAMEBUFFER, 0)
My other function:
def on_draw(dt):
glDrawBuffer(GL_BACK)
glClear(GL_COLOR_BUFFER_BIT)
glClearColor( 0.0, 0.0, 0.0, 1.0)
glLoadIdentity()
glEnable(GL_TEXTURE_2D)
glDisable(GL_TEXTURE_2D)
BlueLine() # draw a simple line. works fine
DropFrameTest() # draw a simple line. works fine
In the main, the call to renderFBO() is done once, and then on_draw is called periodically.
dt = pyglet.clock.tick()
renderFBO()
pyglet.clock.schedule_interval(on_draw, 0.007)
pyglet.app.run()
At a guess, you've bound the framebuffer to the GL_DRAW_FRAMEBUFFER only. Use
glBindFramebuffer(GL_FRAMEBUFFER, ...
and
glFramebufferRenderbuffer(GL_FRAMEBUFFER, ...
to both read and write with the same FBO.
I'm sure you already have but checking for framebuffer completeness (glCheckFramebufferStatus) and for GL errors (glGetError, or the new extension) is also very useful.
[EDIT]
(The shotgun problem solving tactics from the comments)
If you see an image on the first frame, but none on the next there must be something staying behind from the previous frame.
The most common problem is forgetting to clear the depth buffer - but you haven't.
Next up are stencil buffers and blending (neither look like they're enabled to begin with).
Maybe a new FBO handle is being generated each frame and you're running out?
Another common problem is accumulating matrix transforms, but you have glLoadIdentity so should be no issue there.

glReadPixels() sets GL_INVALID_OPERATION error

I'm trying to implement color picking with FBO. I have multisampled FBO (fbo[0]) which I use to render the scene and I have non multisampled FBO (fbo[1]) which I use for color picking.
The problem is: when I try to read pixel data from fbo[1] everything goes well until glReadPixels call which sets GL_INVALID_OPERATION flag. I've checked the manual and can't find the reason why.
The code to create FBO:
glBindRenderbuffer(GL_RENDERBUFFER, rbo[0]);
glRenderbufferStorageMultisample(GL_RENDERBUFFER, numSamples, GL_RGBA8, resolution[0], resolution[1]);
glBindRenderbuffer(GL_RENDERBUFFER, rbo[1]);
glRenderbufferStorageMultisample(GL_RENDERBUFFER, numSamples, GL_DEPTH24_STENCIL8, resolution[0], resolution[1]);
glBindRenderbuffer(GL_RENDERBUFFER, rbo[2]);
glRenderbufferStorage(GL_RENDERBUFFER, GL_R32UI, resolution[0], resolution[1]);
glBindRenderbuffer(GL_RENDERBUFFER, rbo[3]);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, resolution[0], resolution[1]);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo[1]);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo[3]);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, rbo[2]);
OGLChecker::checkFBO(GL_DRAW_FRAMEBUFFER);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo[0]);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo[1]);
glFramebufferRenderbuffer(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, rbo[0]);
OGLChecker::checkFBO(GL_DRAW_FRAMEBUFFER);
My checker stays silent so the FBOs are complete. Next the picking code
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, fbo[1]);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT );
// bla, bla, bla
// do the rendering
unsigned int result;
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, fbo[1]);
int sb;
glReadBuffer(GL_COLOR_ATTACHMENT0);
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
glGetIntegerv(GL_SAMPLE_BUFFERS, &sb);
// glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
OGLChecker::getGlError();
std::cerr << "Sample buffers " << sb << std::endl;
glReadPixels(pos.x(), resolution.y() - pos.y(), 1, 1, GL_RED, GL_UNSIGNED_INT, &result);
OGLChecker::getGlError();
return result;
the output:
Sample buffers 0
OpenGL Error : Invalid Operation
The interesting fact that if I uncomment glBindFramebuffer(GL_READ_FRAMEBUFFER, 0); then no error happens and pixels are read from screen (but I don't need this).
What may be wrong here?
Your problem is the format parameter. For a texture that has a one-channel integer internal format the correct parameter isn't GL_RED, but GL_RED_INTEGER:
glReadPixels(pos.x(), resolution.y() - pos.y(), 1, 1, GL_RED_INTEGER, GL_UNSIGNED_INT, &result);
Look at the OpenGL documentation wiki (emphasis mine):
...
format
Specifies the format of the pixel data. For transfers of depth, stencil, or depth/stencil data, you must use GL_DEPTH_COMPONENT, GL_STENCIL_INDEX, or GL_DEPTH_STENCIL, where appropriate. For transfers of normalized integer or floating-point color image data, you must use one of the following: GL_RED, GL_GREEN, GL_BLUE, GL_RG, GL_RGB, GL_BGR, GL_RGBA, and GL_BGRA. For transfers of non-normalized integer data, you must use one of the following: GL_RED_INTEGER, GL_GREEN_INTEGER, GL_BLUE_INTEGER, GL_RG_INTEGER, GL_RGB_INTEGER, GL_BGR_INTEGER, GL_RGBA_INTEGER, and GL_BGRA_INTEGER. Even if no actual pixel transfer is made (data​ is NULL and no buffer is bound to GL_PIXEL_UNPACK_BUFFER), you must set this parameter correctly for the internal format of the destination image.
...
Note: the official reference page is incomplete/wrong.
Given that it's "fixed" if you uncomment that line of code, I wonder if your driver is lying to you about GL_SAMPLE_BUFFERS being 0. From http://www.opengl.org/sdk/docs/man/xhtml/glReadPixels.xml:
GL_INVALID_OPERATION is generated if GL_READ_FRAMEBUFFER_BINDING is non-zero, the read framebuffer is complete, and the value of GL_SAMPLE_BUFFERS for the read framebuffer is greater than zero.
If you're using NVIDIA's binary driver on Linux and have switched to a non-graphical virtual console (e.g. CTRL+ALT+F1) then any attempt to glReadPixels() will return GL_INVALID_OPERATION (0x502).
Solution: Switch back to the graphical console (usually CTRL+ALT+F7).

Problems outputting gl_PrimitiveID to custom frame buffer object (FBO)

I have a very basic fragment shader which I want to output 'gl_PrimitiveID' to a fragment buffer object (FBO) which I have defined. Below is my fragment shader:
#version 150
uniform vec4 colorConst;
out vec4 fragColor;
out uvec4 triID;
void main(void)
{
fragColor = colorConst;
triID.r = uint(gl_PrimitiveID);
}
I setup my FBO like this:
GLuint renderbufId0;
GLuint renderbufId1;
GLuint depthbufId;
GLuint framebufId;
// generate render and frame buffer objects
glGenRenderbuffers( 1, &renderbufId0 );
glGenRenderbuffers( 1, &renderbufId1 );
glGenRenderbuffers( 1, &depthbufId );
glGenFramebuffers ( 1, &framebufId );
// setup first renderbuffer (fragColor)
glBindRenderbuffer(GL_RENDERBUFFER, renderbufId0);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGBA, gViewWidth, gViewHeight);
// setup second renderbuffer (triID)
glBindRenderbuffer(GL_RENDERBUFFER, renderbufId1);
glRenderbufferStorage(GL_RENDERBUFFER, GL_RGB32UI, gViewWidth, gViewHeight);
// setup depth buffer
glBindRenderbuffer(GL_RENDERBUFFER, depthbufId);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT32, gViewWidth, gViewHeight);
// setup framebuffer
glBindFramebuffer(GL_FRAMEBUFFER, framebufId);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_RENDERBUFFER, renderbufId0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT1, GL_RENDERBUFFER, renderbufId1);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthbufId );
// check if everything went well
GLenum stat = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if(stat != GL_FRAMEBUFFER_COMPLETE) { exit(0); }
// setup color attachments
const GLenum att[] = {GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1};
glDrawBuffers(2, att);
// render mesh
RenderMyMesh()
// copy second color attachment (triID) to local buffer
glReadBuffer(GL_COLOR_ATTACHMENT1);
glReadPixels(0, 0, gViewWidth, gViewHeight, GL_RED, GL_UNSIGNED_INT, data);
For some reason glReadPixels gives me a 'GL_INVALID_OPERATION' error? However if i change the internal format of renderbufId1 from 'GL_RGB32UI' to 'GL_RGB' and I use 'GL_FLOAT' in glReadPixels instead of 'GL_UNSIGNED_INT' then everything works fine. Does anyone know why I am getting the 'GL_INVALID_OPERATION' error and how I can solve it?
Is there an alternative way of outputting 'gl_PrimitiveID'?
PS: The reason I want to output 'gl_PrimitiveID' like this is explained here: Picking triangles in OpenGL core profile when using glDrawElements
glReadPixels(0, 0, gViewWidth, gViewHeight, GL_RED, GL_UNSIGNED_INT, data);
As stated on the OpenGL Wiki, you need to use GL_RED_INTEGER when transferring true integer data. Otherwise, OpenGL will try to use floating-point conversion on it.
BTW, make sure you're using glBindFragDataLocation to set up which buffers those fragment shader outputs go to. Alternatively, you can set it up explicitly in the shader if you're using GLSL 3.30 or above.