I'm trying to display some text using OpenGL with FreeType library. It's working, yet text looks not so smooth. In FreeType documentation it says that there's some antialiasing happing to the texture during loading, but it doesn't look that way in my case.
This is what I'm doing:
FT_Init_FreeType(&m_fontLibrary);
FT_New_Face(m_fontLibrary, "src/VezusLight.OTF", 0, &m_BFont);
FT_Set_Pixel_Sizes(m_BFont, 0, 80);
m_glyph = m_BFont->glyph;
GLuint tex;
glActiveTexture(GL_TEXTURE1);
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glUseProgram(m_textPipeline);
glUniform1i(m_texLocation, 1);
glUseProgram(0);
and then rendering:
glActiveTexture(GL_TEXTURE1);
glEnableVertexAttribArray(m_coordTex);
glBindBuffer(GL_ARRAY_BUFFER, m_VBO);
const char *p;
float x = x_i, y = y_i;
const char* result = text.c_str();
for (p = result; *p; p++)
{
if (FT_Load_Char(m_BFont, *p, FT_LOAD_RENDER))
continue;
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_ALPHA,
m_glyph->bitmap.width,
m_glyph->bitmap.rows,
0,
GL_ALPHA,
GL_UNSIGNED_BYTE,
m_glyph->bitmap.buffer
);
float x2 = x - 1024 + m_glyph->bitmap_left;
float y2 = y - 600 - m_glyph->bitmap_top;
float w = m_glyph->bitmap.width;
float h = m_glyph->bitmap.rows;
GLfloat box[4][4] = {
{ x2, -y2 - h, 0, 1 },
{ x2 + w, -y2 - h, 1, 1 },
{ x2, -y2, 0, 0 },
{ x2 + w, -y2, 1, 0 },
};
glBufferData(GL_ARRAY_BUFFER, 16 * sizeof(GLfloat), box, GL_DYNAMIC_DRAW);
glVertexAttribPointer(m_coordTex, 4, GL_FLOAT, GL_FALSE, 4 * sizeof(GLfloat), NULL);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
x += (m_glyph->advance.x >> 6);
y += (m_glyph->advance.y >> 6);
}
glDisableVertexAttribArray(m_coordTex);
Result looks like this:
Can anyone spot a problem in my code?
Two issues with your code.
First one is a buffer overflow: Texture coodinates in your box structure are vec2, however you tell glVertexAttribPointer it was a vec4 (the stride of 4*sizeof(float) is what matters, and the mismatched size parameters makes OpenGL read out of bounds 2 elements over the end of the box array).
That your texture looks pixelated stems from the fact that texture coordinates 0 and 1 do not come to lie on pixel centers, but the edges of the texture. Either use texelFetch in the fragment shader to address pixels by their pixel coordinate, or remap the texture extents to the range [0…1] properly like explained in https://stackoverflow.com/a/5879551/524368
I think for having transparent color or smooth or anti-aliased glyphs,
you must enable blending in opengl and also disable depth-testing. (you can find out why and how by searching in the internet).
Something like this:
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
glEnable(GL_BLEND);
//and if it didn't work, then disable depth testing by uncommenting this:
//glDisable(GL_DEPTH_TEST);
hope it helps!
Related
So I was trying to learn OpenGL from learnopengl.com and I'm currently on the topic of text rendering and I thought about combining scalable font textures in my program to one big texture, but for some reason, I get an exception bc of glCopyImageSubData(...).
First I try to measure how big the texture should be and then copy textures I already created to one big texture, and I was playing with this function for quite a while now but I can't find a solution.
Here is the original code with texture used for each face.
I tried to create fbo and attach a texture to it, but after research, I found this function that was far more clear to me so I decided to use it instead.
So I added xoffset to Character structure:
struct Character {
GLuint textureID;
glm::ivec2 size;
glm::ivec2 bearing;
GLuint advance;
GLuint xoffset;
};
I add this offset to each face in the first for loop:
Character character = {
texture,
glm::ivec2(face->glyph->bitmap.width, face->glyph->bitmap.rows),
glm::ivec2(face->glyph->bitmap_left, face->glyph->bitmap_top),
face->glyph->advance.x,
font_texture_width
};
font_texture_width += character.size.x;
characters.insert(std::pair<GLchar, Character>(c, character));
And then I try to paste each face texture to fontTexture:
glGenTextures(1, &fontTexture);
glBindTexture(GL_TEXTURE_2D, fontTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RED,
font_texture_width, 100, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
std::cout << glGetError();
for (GLubyte c = 0; c < 128; ++c)
glCopyImageSubData( characters[c].textureID, GL_TEXTURE_2D, 0, 0, 0, 0,
fontTexture, GL_TEXTURE_2D, 0, characters[c].xoffset, 0, 0,
characters[c].size.x, characters[c].size.y, 0);
Here is modyfied renderText function:
void renderText(Shader &s, std::string text, GLfloat x, GLfloat y, GLfloat scale, glm::vec3 color)
{
s.use();
s.setVec3("textColor", color);
glActiveTexture(GL_TEXTURE0);
glBindVertexArray(VAO);
std::string::const_iterator c;
glBindTexture(GL_TEXTURE_2D, fontTexture);
for (c = text.begin(); c != text.end(); ++c)
{
Character ch = characters[*c];
GLfloat xpos = x + ch.bearing.x * scale;
GLfloat ypos = y - (ch.size.y - ch.bearing.y) * scale;
GLfloat w = ch.size.x * scale;
GLfloat h = ch.size.y * scale;
GLfloat vertices[6][4] = {
{ xpos + characters[*c].xoffset, ypos + h, 0.0, 0.0 },
{ xpos + characters[*c].xoffset, ypos, 0.0, 1.0 },
{ xpos + w + characters[*c].xoffset, ypos, 1.0, 1.0 },
{ xpos + characters[*c].xoffset, ypos + h, 0.0, 0.0 },
{ xpos + w + characters[*c].xoffset, ypos, 1.0, 1.0 },
{ xpos + w + characters[*c].xoffset, ypos + h, 1.0, 0.0 }
};
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(vertices), vertices);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glDrawArrays(GL_TRIANGLES, 0, 6);
x += (ch.advance >> 6) * scale;
}
glBindVertexArray(0);
glBindTexture(GL_TEXTURE_2D, 0);
}
Exception is being thrown where I call glCopyImageSubData.
Here is my whole program: https://pastebin.com/vAeeX3Xh
EDIT:
Now I figured out that it would be better to use glTexSubImage2D instead, so this is how it looks like (instead of this block of code with glCopyImageSubData)
glGenTextures(1, &fontTexture);
glBindTexture(GL_TEXTURE_2D, fontTexture);
glTexImage2D(
GL_TEXTURE_2D,
0,
GL_RED,
face->glyph->bitmap.width * 250,
face->glyph->bitmap.rows * 250,
0,
GL_RED,
GL_UNSIGNED_BYTE,
NULL
);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
for (GLubyte c = 0; c < 128; ++c)
{
if (FT_Load_Char(face, c, FT_LOAD_RENDER))
{
std::cout << "ERROR::FREETYPE: Failed to load Glyph" << std::endl;
continue;
}
glTexSubImage2D(GL_TEXTURE_2D, 0, characters[c].xoffset, 0, characters[c].size.x, characters[c].size.y, GL_RED, GL_UNSIGNED_BYTE, face->glyph->bitmap.buffer);
}
Now when I render this texture it looks like this:
https://imgur.com/a/A0gDy6T
I'm learning OpenGL from the MakingGamesWithBen series and I'm writing a simple asteroid shooter based on his engine. I have created a system that randomly positions the asteroid sprites with random sizing, and selects a random texture path from an std::vector, and passes the path to the asteroid constructor. The sprites are drawn, however only the first texture is drawn. I've read that I need to bind those textures and switch to the relevant glActiveTexture; from my code below, how would I go about this?
void MainGame::prepareTextures() {
//compile shaders and get Texlocations
initShaders("Shaders/background.vert", "Shaders/background.frag");
GLint TexLoc = _colorProgram.getUniformLocation("bgTexture");
glActiveTexture(GL_TEXTURE0);
}
m_asteroid[i].draw():
glm::vec4 uv(0.0f, 0.0f, 1.0f, 1.0f);
//convert m_imgNum to string and remove trailing zeros
std::string strImgNum = std::to_string(m_imgNum);
strImgNum.erase(strImgNum.find_last_not_of('0') + 1, std::string::npos);
//construct filpath
std::string filePath = m_dir + strImgNum + ".png";
static Engine::GLTexture texture = Engine::ResourceManager::GetTexture(filePath, 0, 0, 32, 4);
Engine::Color color;
color.r = 255;
color.g = 255;
color.b = 255;
color.a = 255;
glm::vec4 posAndSize = glm::vec4(m_posX, m_posY, m_width, m_height);
spriteBatch.Draw(posAndSize, uv, texture.id, 0.0f, color);
Engine::ResourceManager::GetTexture():
GLTexture texture = {};
unsigned char *imageData = stbi_load(filePath.c_str(), &width, &height, &bitsPerPixel, forceBpp);
if (imageData == NULL) {
const char *loadError = stbi_failure_reason();
stbi_image_free(imageData);
fatalError(loadError);
}
//Create the texture in opengl
glGenTextures(1, &(texture.id));
glBindTexture(GL_TEXTURE_2D, texture.id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, imageData);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);
glGenerateMipmap(GL_TEXTURE_2D);
stbi_image_free(imageData);
glBindTexture(GL_TEXTURE_2D, 0);
texture.width = width;
texture.height = height;
return texture;
renderbatch():
void SpriteBatch::renderbatch() {
glBindVertexArray(m_vao);
for (unsigned int i = 0; i < m_renderBatches.size(); i++) {
glBindTexture(GL_TEXTURE_2D, m_renderBatches[i].texture);
glDrawArrays(GL_TRIANGLES, m_renderBatches[i].offset, m_renderBatches[i].numVertices);
}
glBindVertexArray(0);
}
I can provide any other code/clarification that may be needed!
Usually when not all textures are showing up, it's probably has something to do with uploading the texture by OpenGL. You could try to debug this by checking if your textures are all uploaded properly;
std::uint32_t textureId = Engine::ResourceManager::GetTexture(filePath, 0, 0, 32, 4).id;
std::cout << "texture id, should not be 0 :" << textureId << std::endl;
This could happen if you called this function not from a thread with OpenGL context.
EDIT:
Is there any reason to use a static object here?
static Engine::GLTexture texture = Engine::ResourceManager::GetTexture(filePath, 0, 0, 32, 4);
try changing that to just
Engine::GLTexture texture = Engine::ResourceManager::GetTexture(filePath, 0, 0, 32, 4);
UPDATE:
I just replaced your spitebatch.cpp/.h with the ones from Ben's github and put a simple test in your MainGame.cpp;
m_spriteBatch.begin();
asteroids.draw(m_spriteBatch);
ship.draw(m_spriteBatch);
m_spriteBatch.end();
m_spriteBatch.renderBatch();
_colorProgram.unuse();
_window.SwapBuffers();
and I can render the two uploaded textures properly;
HTH.
I'm trying to draw a textured plane following the OpenGL SuperBible 6th ed. but for some reason I fail.
Here's my texture initialization code.
GLuint texture;
glGenTextures(1, &texture);
glActiveTexture(GL_TEXTURE0);
int w = 256;
int h = 256;
glBindTexture(GL_TEXTURE_2D, texture);
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA32F, w, h);
float * data = new float[w * h * 4];
//This just creates some image data
generateTexture(data, w, h);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_FLOAT, data);
delete [] data;
This is the plane object. The object itself is drawn, just untextured.
glGenBuffers(1, &planeBuffer);
glBindBuffer(GL_ARRAY_BUFFER, planeBuffer);
glBufferData(GL_ARRAY_BUFFER,
sizeof(planePositions),
planePositions,
GL_STATIC_DRAW);
These are my vertex and fragment shaders.
#version 430 core
layout (location = 0) in vec3 position;
uniform mat4 proj, view;
void main(void){
gl_Position = proj * view * vec4 (position, 1.0);
}
#version 430 core
uniform sampler2D s;
out vec4 frag_color;
void main () {
frag_color = texelFetch(s, ivec2(gl_FragCoord.xy), 0);
};
I draw like this
glUseProgram(textureProgram);
GLuint projLocation = glGetUniformLocation (textureProgram, "proj");
glUniformMatrix4fv (projLocation, 1, GL_FALSE, projectionSource);
GLuint viewLocation = glGetUniformLocation (textureProgram, "view");
glUniformMatrix4fv (viewLocation, 1, GL_FALSE, viewSource);
glBindBuffer(GL_ARRAY_BUFFER, planeBuffer);
GLuint positionLocation = glGetAttribLocation(textureProgram, "position");
glVertexAttribPointer (positionLocation, 3, GL_FLOAT, GL_FALSE, 0, NULL);
glEnableVertexAttribArray (positionLocation);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, texture);
GLuint ts = glGetUniformLocation (textureProgram, "s");
glUniform1i(ts, 0);
glDrawArrays(GL_TRIANGLES, 0, 6);
glDisableVertexAttribArray (positionLocation);
//Afterwards I draw more geometry with other shaders. This shows correctly
glUseProgram(shaderProgram);
//Bind buffers, matrices, drawarrays, etc
But I just get a black untextured plane. If I override the frag_color assignment by adding another line afterwards, like so
frag_color = vec4(1.0);
it works, i.e. I get a white plane, so the shaders seem to be working correctly.
I don't get any errors whatsoever from glGetError().
Compatibility:
OpenGL version supported: 4.2.12337 Compatibility Profile Context 13.101
GLSL version supported: 4.30
The data array does contain values between 0 and 1. I have also tried hard-coding some random coordinates into the texelFetch() function, but I always get a black plane. It looks as though the sampler2D contained nothing but zeroes. I have also tried hard-coding the values contained in data to 1.0, 255.0, but nothing.
Either the book fails to mention something or I am missing something stupid. Why does the texture refuse to show on the plane?
EDIT: I added some code to the drawing part. The rest of the geometry that I draw (with different shaders) shows perfectly. None of it uses textures.
I finally got my way around this, although it is not clear what the problem is exactly.
I got the texture to show using glTexImage2D instead of glTexStorage2D and glTexSubImage2D.
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, w, h, 0, GL_RGBA, GL_FLOAT, data);
I also had to set the parameters explicitly, even though I'm using texelFetch().
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Binding the uniform explicitly was not necessary.
What is strange is that, according to the docs, glTexStorage2Dis equivalent to this:
for (i = 0; i < levels; i++) {
glTexImage2D(target, i, internalformat, width, height, 0, format, type, NULL);
width = max(1, (width / 2));
height = max(1, (height / 2));
}
However, this combination does not work.
glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA, w, h);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, w, h, GL_RGBA, GL_FLOAT, data);
I still have to figure out why is that, but at least I got it to work.
I'm just testing this stuff out, so I don't need an alternate approach (no GL extensions). Just hoping someone sees an obvious mistake in my usage of GLES.
I want to take an bitmap of a glyph that is smaller than 32x32 (width and height are not necessarily powers of 2) and put it into a texture so I can render it. I've first created an empty 32x32 texture then I copy the pixels into the larger texture.
Gluint texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTextImage2D(GL_TEXTURE_2D, 0, GL_ALPHA, 32, 32, 0,
GL_ALPHA, GL_UNSIGNED_BYTE NULL);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, bitmap.width(), bitmap.height(),
GL_ALPHA, GL_UNSIGNED_BYTE, bitmap.pixels());
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParamteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParamteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Then I try to draw only the bitmap portion of the texture using the texture coordinates:
const GLfloat vertices[] = {
x + bitmap.width(), y + bitmap.height(),
x, y + bitmap.height(),
x, y,
x + bitmap.width(), y
};
const GLfloat texCoords[] = {
0, bitmap.height() / 32,
bitmap.width() / 32, bitmap.height() / 32,
0, 0,
bitmap.width() / 32, 0
};
const GLushort indices[] = { 0, 1, 2, 0, 2, 3 };
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_TEXTURE_COORD_ARRAY);
glVertexPointer(2, GL_FLOAT, 0, vertices);
glTexCoordPointer(2, GL_FLOAT, 0, texCoords);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, indices);
Now if all were well in the world, the size of the square created by the vertices would be the same size as the bitmap portion of the texture and it would draw my bitmap exactly.
Lets say for example that my glyph was 16x16, then it should take up the bottom left quadrant of the 32x32 texture. Then the texCoords would seem to be correct with (0, 0.5), (0.5, 0.5), (0, 0) and (0.5, 0).
However my 12x12 'T' glyph looks like this:
Anyone know why?
BTW. I start by setting up the projection matrix for 2D work as such:
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0.0f, 480, 800, 0.0f, 0.0f, 1.0f);
glDisable(GL_DEPTH_TEST);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glTranslate(0.375f, 0.375f, 0.0f); // for exact pixelization
glEnable(GL_BLEND);
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_ALPHA);
glEnable(GL_TEXTURE_2D);
glClearColor(1.0f, 1.0f, 1.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
The mapping between vertex coordinates and texture coordinates seems to be mixed up. Try changing your vertex coordinates to:
const GLfloat vertices[] = {
x, y + bitmap.height(),
x + bitmap.width(), y + bitmap.height(),
x, y,
x + bitmap.width(), y
};
As an aside:
I don't think you need to go the route via vertex indices in your case. Easier would be a call to glDrawArrays:
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
(as you have already set up your glVertexPointer and glTexCoordPointer).
This should be really simple, but it's consumed multi-hours of my time, and I have no clue what's going on.
I'm rendering a flat-colored full-screen quad to a texture, then reading back the result with glGetTexImage. It's GPGPU related, so I want the alpha value to behave as if it's any of the other three. I'm using an FBO, texture format GL_RGBA32F_ARB, NVidia card on a MacBook Pro with 10.5, if it matters.
I only get back the correct color if the alpha I specify is one; with any other value it appears to be blending with what's already in the framebuffer, even though I've explicitly disabled GL_BLEND. I also tried enabling blending and using glBlendFunc(GL_ONE, GL_ZERO) but the end result is the same. I can clear the framebuffer to zero before rendering, which fixes it, but I want to understand why that's necessary. As a second test, rendering two overlapping quads gives a blended result, when I just want the original 4-channel color back. Surely the solid color quad should be overwriting pixels in the framebuffer completely? I'm guessing I've misunderstood something fundamental. Thanks.
const size_t res = 16;
GLuint tex;
glGenTextures(1, &tex);
glBindTexture(GL_TEXTURE_2D, tex);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_GENERATE_MIPMAP, GL_FALSE);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F_ARB,
res, res, 0, GL_RGBA, GL_FLOAT, 0);
glBindTexture(GL_TEXTURE_2D, 0);
GLuint fbo;
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT,
GL_COLOR_ATTACHMENT0_EXT, GL_TEXTURE_2D, tex, 0);
glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
glViewport(0, 0, res, res);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(0, res, 0, res, -1, 1);
glClearColor(0,0,0,0);
glClear(GL_COLOR_BUFFER_BIT);
//glEnable(GL_BLEND);
//glBlendFunc(GL_ONE, GL_ZERO);
glDisable(GL_BLEND);
glDisable(GL_DEPTH_TEST);
glColor4f(0.2, 0.3, 0.4, 0.5);
for (int i=0; i<2; ++i) {
glBegin(GL_QUADS);
glVertex2i(0,0);
glVertex2i(res, 0);
glVertex2i(res, res);
glVertex2i(0, res);
glEnd();
}
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, 0);
std::vector<float> tmp(res*res*4);
glBindTexture(GL_TEXTURE_2D, tex);
glGetTexImage(GL_TEXTURE_2D, 0,
GL_RGBA, GL_FLOAT, &tmp.front());
const float * const x = &tmp.front();
cerr << x[0] << " " << x[1] << " " << x[2] << " " << x[3] << endl;
// prints 0.3 0.45 0.6 0.75
glDeleteTextures(1, &tex);
glDeleteFramebuffersEXT(1, &fbo);
Not really a good answer, however, some things to note:
What you're observing does not really look like blending. For one, your back-buffer is initially rgba=0, so alpha-blending against it would give 0, not 0.2 0.3 0.4 0.5 like you may observe.
my inclination was that you somehow set the same texture buffer as texture and framebuffer attachement. This is undefined in the spec (section 4.4.3). In the code snippet you provide, you do a glBindTexture(GL_TEXTURE_2D, 0) though, which should make sure it is not the case... I'll let it here in case you've missed it.