Related
I'm using opengl to draw the graphics for simple game like space invaders. So far I have it rendering moving meteorites and a gif file quite nicely. I get the basics. But I just cant get the framebuffer working properly which I indent to render bitmap font to.
The first function will be called inside the render function only when the score changes, This will produce a texture containing the score characters. The second function will draw the texture containing the bitmap font characters to the screen every time the render function is called. I thought this would be a more efficient way to draw the score. Right now I'm just trying to get it drawing a square using the frameBuffer, but it seems that the coordinates range from -1 to 0. I thought the coordinates for a texture went from 0 to 1? I commented which vertex effects which corner of the square and it seems to be wrong.
void Score::UpdateScoreTexture(int* success)
{
int length = 8;
char* chars = LongToNumberDigits(count, &length, 0);
glDeleteTextures(1, &textureScore);//last texture containing previous score deleted to make room for new score
glGenTextures(1, &textureScore);
GLuint frameBufferScore;
glGenTextures(1, &textureScore);
glBindTexture(GL_TEXTURE_2D, textureScore);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 256, 256, 0, GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glGenFramebuffers(1, &frameBufferScore);
glBindFramebuffer(GL_FRAMEBUFFER, frameBufferScore);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, textureScore, 0);
GLenum status;
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
std::cout << "status is: ";
std::cout << "\n";
switch (status)
{
case GL_FRAMEBUFFER_COMPLETE:
std::cout << "good";
break;
default:
PrintGLStatus(status);
while (1 == 1);
}
glBindFramebuffer(GL_FRAMEBUFFER, frameBufferScore);
glBegin(GL_POLYGON);
glVertex3f(-1, -1, 0.0);//appears to be the bottom left,
glVertex3f(0, -1, 0.0);//appears to be the bottom right
glVertex3f(0, 0, 0.0);//appears to be the top right
glVertex3f(-1, 0, 0.0);//appears to be the top left
glEnd();
glDisable(GL_TEXTURE_2D);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindTexture(GL_TEXTURE_2D,0);
glDeleteFramebuffers(1, &frameBufferScore);
//glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, chars2);
}
void Score::DrawScore(void)
{
glEnable(GL_TEXTURE_2D);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glBindTexture(GL_TEXTURE_2D, textureScore);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex3f(0.7, 0.925, 0.0);
glTexCoord2f(0.0, 1.0); glVertex3f(0.7, 0.975, 0.0);
glTexCoord2f(1.0, 1.0); glVertex3f(0.975, 0.975, 0.0);
glTexCoord2f(1.0, 0.0); glVertex3f(0.975, 0.925, 0.0);
glEnd();
glFlush();
glDisable(GL_TEXTURE_2D);
}
Any ideas where I'm going wrong?
You have not set the glViewport, this may give you problems.
Another possibility is that you have the matrix set to something other than identity.
Ensure that you have reset the model-view and projection matrices to identity (or what you want them to be) before glBegin(GL_POLYGON) in UpdateScoreTexture() (You may wish to push the matrices to the stack before you make changes):
glViewport(0,0, framebufferWidth, framebufferHeight)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glMatrixMode(GL_MODELVIEW)
glPushMatrix()
glLoadIdentity()
Then put them back at the end of the function:
glViewport(0,0, width, height)
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glMatrixMode(GL_MODELVIEW)
glPopMatrix()
I am trying to use deferred shading to implement SSAO and I have problems to access my textures in the deferred fragment shader. The code is in C++/Qt5 and makes use of Coin3D to generate the rest of the UI (but this shouldn't really matter here).
The fragment shader of the deferred pass is:
#version 150 compatibility
uniform sampler2D color;
uniform sampler2D position;
uniform sampler2D normal;
uniform vec3 dim;
uniform vec3 camPos;
uniform vec3 camDir;
void main()
{
// screen position
vec2 t = gl_TexCoord[0].st;
// the color
vec4 c = texture2D(color, t);
gl_FragColor = c + vec4(1.0, t.x, t.y, 1.0);
}
The code for running the deferred pass is
_geometryBuffer.Unbind();
// push state
{
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glPushAttrib(GL_DEPTH_BUFFER_BIT |
GL_COLOR_BUFFER_BIT |
GL_LIGHTING_BIT |
GL_SCISSOR_BIT |
GL_POLYGON_BIT |
GL_CURRENT_BIT);
glDisable(GL_DEPTH_TEST);
glDisable(GL_ALPHA_TEST);
glDisable(GL_LIGHTING);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_SCISSOR_TEST);
glDisable(GL_CULL_FACE);
}
// bind shader
// /!\ IMPORTANT to do before specifying locations
_deferredShader->bind();
_CheckGLErrors("deferred");
// specify positions
_deferredShader->setUniformValue("camPos", ...);
_deferredShader->setUniformValue("camDir", ...);
_geometryBuffer.Bind(GBuffer::TEXTURE_TYPE_NORMAL, 2);
_deferredShader->setUniformValue("normal", GLint(2));
_geometryBuffer.Bind(GBuffer::TEXTURE_TYPE_POSITION, 1);
_deferredShader->setUniformValue("position", GLint(1));
_geometryBuffer.Bind(GBuffer::TEXTURE_TYPE_DIFFUSE, 0);
_deferredShader->setUniformValue("color", GLint(0));
_CheckGLErrors("bind");
// draw screen quad
{
glBegin(GL_QUADS);
glTexCoord2f(0, 0);
glColor3f(0, 0, 0);
glVertex2f(-1, -1);
glTexCoord2f(1, 0);
glColor3f(0, 0, 0);
glVertex2f( 1, -1);
glTexCoord2f(1, 1);
glColor3f(0, 0, 0);
glVertex2f( 1, 1);
glTexCoord2f(0, 1);
glColor3f(0, 0, 0);
glVertex2f(-1, 1);
glEnd();
}
_deferredShader->release();
// for debug
_geometryBuffer.Unbind(2);
_geometryBuffer.Unbind(1);
_geometryBuffer.Unbind(0);
_geometryBuffer.DeferredPassBegin();
_geometryBuffer.DeferredPassDebug();
// pop state
{
glPopAttrib();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glMatrixMode(GL_MODELVIEW);
glPopMatrix();
}
I know that the textures have been correctly processed in the geometry buffer creation because I can dump them into files and get the expected result.
The deferred pass doesn't work. The shader compiled correctly and I get the following result on screen:
And the last part of my code (DeferredPassBegin/Debug) is to draw the FBO to the screen (as shown in screenshot) as a proof that the GBuffer is correct.
The current result seems to mean that the textures are not correctly bound to their respective uniform, but I know that the content is valid as I dumped the textures to files and got the same results as shown above.
My binding functions in GBuffer are:
void GBuffer::Unbind()
{
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, 0);
}
void GBuffer::Bind(TextureType type, uint32_t idx)
{
glActiveTexture(GL_TEXTURE0 + idx);
glEnable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, _textures[static_cast<uint32_t>(type)]);
}
void GBuffer::Unbind(uint32_t idx)
{
glActiveTexture(GL_TEXTURE0 + idx);
glDisable(GL_TEXTURE_2D);
glBindTexture(GL_TEXTURE_2D, 0);
}
Finally, the textures are 512/512, and I created them in my GBuffer with:
WindowWidth = WindowHeight = 512;
// Create the FBO
glGenFramebuffers(1, &_fbo);
glBindFramebuffer(GL_FRAMEBUFFER, _fbo);
const uint32_t NUM = static_cast<uint32_t>(NUM_TEXTURES);
// Create the gbuffer textures
glGenTextures(NUM, _textures);
glGenTextures(1, &_depthTexture);
for (unsigned int i = 0 ; i < NUM; i++) {
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WindowWidth, WindowHeight, 0, GL_RGBA, GL_FLOAT, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i + _firstIndex, GL_TEXTURE_2D, _textures[i], 0);
}
// depth
glBindTexture(GL_TEXTURE_2D, _depthTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, WindowWidth, WindowHeight, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, _depthTexture, 0);
GLenum buffers[NUM];
for(uint32_t i = 0; i < NUM; ++i){
buffers[i] = GLenum(GL_COLOR_ATTACHMENT0 + i + _firstIndex);
}
glDrawBuffers(NUM, buffers);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
printf("FB error, status: 0x%x\n", status);
return _valid = false;
}
// unbind textures
glBindTexture(GL_TEXTURE_2D, 0);
// restore default FBO
glBindFramebuffer(GL_FRAMEBUFFER, 0);
How can I debug farther at this stage?
I know that the texture data is valid, but I can't seem to bind it to the shader correctly (but I have other shaders that use textures loaded from files and which work fine).
--- Edit 1 ---
As asked, the code for DeferredPassBegin/Debug (mostly coming from this tutorial )
void GBuffer::DeferredPassBegin() {
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindFramebuffer(GL_READ_FRAMEBUFFER, _fbo);
}
void GBuffer::DeferredPassDebug() {
GLsizei HalfWidth = GLsizei(_texWidth / 2.0f);
GLsizei HalfHeight = GLsizei(_texHeight / 2.0f);
SetReadBuffer(TEXTURE_TYPE_POSITION);
glBlitFramebuffer(0, 0, _texWidth, _texHeight,
0, 0, HalfWidth, HalfHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
SetReadBuffer(TEXTURE_TYPE_DIFFUSE);
glBlitFramebuffer(0, 0, _texWidth, _texHeight,
0, HalfHeight, HalfWidth, _texHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
SetReadBuffer(TEXTURE_TYPE_NORMAL);
glBlitFramebuffer(0, 0, _texWidth, _texHeight,
HalfWidth, HalfHeight, _texWidth, _texHeight, GL_COLOR_BUFFER_BIT, GL_LINEAR);
}
Arghk!!!
So I expected that texture parameters were not mandatory, but as I looked at some code, I just tried to specify my texture parameters. When generating the FBO textures, I use now
for (unsigned int i = 0 ; i < NUM; i++) {
glBindTexture(GL_TEXTURE_2D, _textures[i]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, WindowWidth, WindowHeight, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + i + _firstIndex, GL_TEXTURE_2D, _textures[i], 0);
}
And with this change, I get the expected result (with only c in the fragment shader, and similar correct results if I switch to visualizing the normal / position).
Conclusion: one must specify the texture parameters for deferred shading to work (at least with the graphics setup of my application / machine).
I've trying to load different textures to the GL_TEXTUREX variables and then assign them to different spheres. but so far I've having problems. I tried some of the suggestions in post like this and this
but couldnt solve it.
This is part of my code:
GLuint textures[2];
void LoadTextures(std::string const& dirname)
{
glGenTextures(2, textures);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[0]);
Image_t sun = loadPNG(std::string(dirname + "/sun.png"));
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, 1024, 512, 0, GL_RGB, GL_UNSIGNED_BYTE, &(sun.data[0]));
glBindTexture(GL_TEXTURE_2D, textures[0]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, textures[1]);
Image_t mercury = loadPNG(std::string(dirname + "/mercury.png"));
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 1024, 512, 0, GL_RGBA, GL_UNSIGNED_BYTE, &(mercury.data[0]));
}
and
void draw(void)
{
glEnable (GL_TEXTURE_2D);
glBindTexture (GL_TEXTURE_2D, textures[0]);
glPushMatrix();
glTranslatef(0.4,0,0);
gluSphere(sun, 0.5, 36, 36); //I want this sphere to use the texture GL_TEXTURE0
glPopMatrix();
glDisable(GL_TEXTURE_2D);
}
You need to call glBindTexture just before performing a draw call:
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[0]);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, textures[1]);
draw_things();
In the above example, whatever you draw with draw_things will have access to both textures 0 and 1 (GL_TEXTURE0 and GL_TEXTURE1).
Now, if what you want is to draw many things, but each with a different texture, then you need to:
glActiveTexture(GL_TEXTURE0); // activate any texture unit
glEnable(GL_TEXTURE_2D); // make sure texturing is enabled
glBindTexture(GL_TEXTURE_2D, thing1_texture);
draw_thing1();
glBindTexture(GL_TEXTURE_2D, thing2_texture);
draw_thing2();
// etecetera
I have been trying to render a teapot to a FBO and then use the subsequent texture as a texture map. nothing seems to come up, so I was wondering what I was doing wrong. Below is the main loop:
//switch to fbo
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glLoadIdentity();
gluLookAt(0.0,0.0,5.0,
0.0,0.0,-1.0,
0.0f,1.0f,0.0f);
glLightfv(GL_LIGHT0, GL_POSITION, lpos);
glRotatef(a,0,1,1);
glutSolidTeapot(1);
//switch to main
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glLoadIdentity();
gluLookAt(0.0,0.0,5.0,
0.0,0.0,-1.0,
0.0f,1.0f,0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBindTexture(GL_TEXTURE_2D, img);
//draw cube
glColor4f(1, 1, 1, 1);
glBegin(GL_TRIANGLES);
// front faces
glNormal3f(0,0,1);
// face v0-v1-v2
glTexCoord2f(1,1); glVertex3f(1,1,1);
glTexCoord2f(0,1); glVertex3f(-1,1,1);
glTexCoord2f(0,0); glVertex3f(-1,-1,1);
...draws cube
I have tried using the glFramebufferStatus and it has returned "success"
glGenTextures(1, &img);
glBindTexture(GL_TEXTURE_2D, img);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 512, 512, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glGenFramebuffers(1,&fbo);
glBindFramebuffer(GL_FRAMEBUFFER,fbo);
glGenRenderbuffers(1, &depthbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, 512, 512);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthbuffer);
glGenTextures(1, &img);
glBindTexture(GL_TEXTURE_2D, img);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, 512, 512, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, img, 0);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status==GL_FRAMEBUFFER_COMPLETE) {
printf("success\n");
}
However, all I get is the cube with no texture mapping.
There are two possible errors here:
Rendering into the FBO fails. To check if this is the case, clear the FBO with a distinct background color (e.g. pink) and see if the texture is affected (by reading it back to the CPU, for example). You should also make sure that your transformation matrices and the viewport (glViewport) are correct.
Texture mapping fails. Make sure that you've got everything set up for texture mapping and test texture mapping with a static texture first.
What about glEnable ( GL_TEXTURE_2D ) ?
I have some code in OpenGL to render a YUV image onto an OpenGL viewport. The program works without a problem when running on nvidia cards, but it generates an error when running over the Intel HD 3000, which sadly is the target machine. The point where the error is generated is marked in the code.
The shader programs are
// Vertex Shader
#version 120
void main() {
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
}
// fragment shader
#version 120
uniform sampler2D texY;
uniform sampler2D texU;
uniform sampler2D texV;
void main() {
vec4 color;
float y = texture2D(texY, gl_TexCoord[0].st).r;
float u = texture2D(texU, gl_TexCoord[0].st).r;
float v = texture2D(texV, gl_TexCoord[0].st).r;
color.r = (1.164 * (y - 0.0625)) + (1.596 * (v - 0.5));
color.g = (1.164 * (y - 0.0625)) - (0.391 * (u - 0.5)) - (0.813 * (v - 0.5));
color.b = (1.164 * (y - 0.0625)) + (2.018 * (u - 0.5));
color.a = 1.0;
gl_FragColor = color;
};
Then I run the program like this:
GLuint textures[3];
glGenTextures(3, textures);
glBindTexture(GL_TEXTURE_2D, textures[YTEX]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, textures[UTEX]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, textures[VTEX]);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glBindTexture(GL_TEXTURE_2D, 0);
GLsizei size = width * height;
GLvoid *y = yuv_buffer;
GLvoid *u = (GLubyte *)y + size;
GLvoid *v = (GLubyte *)u + (size >> 2);
glUseProgram(program_id);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[0]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width, height, 0, GL_LUMINANCE,
GL_UNSIGNED_BYTE, y);
glUniform1i(glGetUniformLocation(program_id, "texY"), 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, textures[1]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width >> 1, height >> 1, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, u);
glUniform1i(glGetUniformLocation(program_id, "texU"), 1);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, textures[2]);
glTexImage2D(GL_TEXTURE_2D, 0, GL_LUMINANCE, width >> 1, height >> 1, 0,
GL_LUMINANCE, GL_UNSIGNED_BYTE, u);
glUniform1i(glGetUniformLocation(program_id, "texV"), 2);
glBegin(GL_QUADS);
glTexCoord2f(texLeft, texTop);
glVertex2i(left, top);
glTexCoord2f(texLeft, texBottom);
glVertex2i(left, bottom);
glTexCoord2f(texRight, texBottom);
glVertex2i(right, bottom);
glTexCoord2f(texRight, texTop);
glVertex2i(right, top);
glEnd();
// glError() returns 0x506 here
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE0);
glUseProgram(0);
update since the error happens with frame buffers, I discover they are used like this:
when the program is instantiated, a frame buffer is created like this:
glViewport(0, 0, (GLint)width, (GLint)height);
glGenFramebuffers(1, &fbo_id);
glGenTextures(1, &fbo_texture);
glGenRenderbuffers(1, &rbo_id);
glBindTexture(GL_TEXTURE_2D, fbo_texture);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_TRUE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0,
GL_RGBA, GL_UNSIGNED_BYTE, 0);
glBindTexture(GL_TEXTURE_2D, 0);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, rbo_id);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, width, height);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo_id);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT,
GL_TEXTURE_2D, fbo_texture, 0);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT,
GL_RENDERBUFFER_EXT, rbo_id);
GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0);
glPushAttrib(GL_TEXTURE_BIT);
glBindTexture(GL_TEXTURE_2D, m_frameTexture->texture());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glPopAttrib();
The YUV image comes spliced in tiles, which are assembled by rendering in this fbo. Whenever a frame starts, this is performed:
glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0);
glDrawBuffer(GL_BACK);
glViewport(0, 0, (GLint)width, (GLint)height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0, (double)width, 0.0, (double)height, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glBindFramebuffer(GL_FRAMEBUFFER_EXT, fbo_id);
Then the code above is executed, and after all the tiles had been assembled together
glBindFramebuffer(GL_FRAMEBUFFER_EXT, 0);
glPushAttrib(GL_VIEWPORT_BIT | GL_TEXTURE_BIT | GL_ENABLE_BIT);
glViewport(0, 0, width, height);
glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glPushMatrix();
glLoadIdentity();
glOrtho(0.0, (double)width, 0.0, (double)height, -1.0, 1.0);
glMatrixMode(GL_MODELVIEW);
glPushMatrix();
glLoadIdentity();
glEnable(GL_TEXTURE_2D);
glDisable(GL_DEPTH_TEST);
glBindTexture(GL_TEXTURE_2D, fbo_texture);
glBegin(GL_QUADS);
glTexCoord2i(0, 0);
glVertex2f(renderLeft, renderTop);
glTexCoord2i(0, 1);
glVertex2f(renderLeft, renderTop + renderHeight);
glTexCoord2i(1, 1);
glVertex2f(renderLeft + renderWidth, renderTop + renderHeight);
glTexCoord2i(1, 0);
glVertex2f(renderLeft + renderWidth, renderTop);
glEnd();
glPopMatrix();
glMatrixMode(GL_PROJECTION);
glPopMatrix();
glPopAttrib();
What's the value of status after:
GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
If the value is anything other than GL_FRAMEBUFFER_COMPLETE, OpenGL will probably choke when it tries to read from the FBO.
The glCheckFramebufferStatus docs describes other (error) values it can return, and what causes them.
Of particular interest might be:
If the currently bound framebuffer is not framebuffer complete, then
it is an error to attempt to use the framebuffer for writing or
reading. This means that rendering commands (glDrawArrays and
glDrawElements) as well as commands that read the framebuffer
(glReadPixels, glCopyTexImage2D, and glCopyTexSubImage2D) will
generate the error GL_INVALID_FRAMEBUFFER_OPERATION if called while
the framebuffer is not framebuffer complete.
(emphasis mine)
edit based on your comments:
To paraphrase the docs wrt GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
Not all framebuffer attachment points are framebuffer attachment complete.
This means that one of the following is happening:
At least one attachment point with a renderbuffer or texture attached has its attached object no longer in existence or has an attached image with a width or height of zero,
The color attachment point has a non-color-renderable image attached. Color-renderable formats include GL_RGBA4, GL_RGB5_A1, and GL_RGB565.
The depth attachment point has a non-depth-renderable image attached. GL_DEPTH_COMPONENT16 is the only depth-renderable format.
The stencil attachment point has a non-stencil-renderable image attached. GL_STENCIL_INDEX8 is the only stencil-renderable format.
We can rule out the last 2 bullets, because it doesn't appear that you're using depth or stencil attachements. That leaves two calls to examine:
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, fbo_texture, 0);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, rbo_id);
From the opengl.org wiki on FBOs:
You get GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT when any of the attachments are 'incomplete'. Criteria for completeness are:
The source object for the image still exists and has the same type it was attached with.
The image has a non-zero width and height.
The layer for 3D or array textures attachments is less than the depth of the texture.
The image's format must match the attachment point's requirements, as defined above. Color-renderable formats for color attachments, etc.
The wiki says of GL_COLOR_ATTACHMENTi​:
These attachment points can only have images bound to them with
color-renderable formats. All compressed image formats are not
color-renderable, and thus cannot be attached to an FBO.
Double check that the fbo_texture and rbo_id are still valid, and that their height/width aren't 0. Finally, it could be fbo_texture's format. You've got it set to GL_RGBA8, but the docs say valid options include GL_RGBA4, GL_RGB5_A1, and GL_RGB565. I'm not sure whether or not that excludes all other formats (like your GL_RGBA8). The wiki seems to suggest that any non-compressed format should work. Try switching it to GL_RGBA4, and see if that works out.
glGetError error codes "stick" and are not automatically cleared. If something at the beginning your program generates OpenGL error AND you check for error code 1000 opengl calls later, error will be still here.
So if you want to understand what's REALLY going on, check for errors after every OpenGL call, or call glGetError in a loop, until all error codes are returned (as OpenGL documentation suggests).
I solved the problem. It was an extensions problem which made the render buffer object disappear. I basically changed this
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, rbo_id);
glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT, width, height);
glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, 0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo_id);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT,
GL_TEXTURE_2D, fbo_texture, 0);
glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT,
GL_RENDERBUFFER_EXT, rbo_id);
GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT);
for this
glBindRenderbuffer(GL_RENDERBUFFER, rbo_id);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
glBindFramebuffer(GL_FRAMEBUFFER, fbo_id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,
GL_TURE_2D, fbo_texture, 0);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,
GL_RENDERBUFFER, rbo_id);
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
and then it worked. I still wonder exactly what the problem was, but so far I am happy with the result. Special thanks to #luke who's answer helped to locate the exact point of the problem.
Exactly, what command raises error? Try to replace GL_QUADS with GL_TRIANGLE_FAN.