OpenGL texture is all black when rendered with shader - c++

I have a very simple OpenGL application that renders only one textured quad. This is my code, which works just fine (the textured quad appears just fine):
// Bind the test texture
glBindTexture(GL_TEXTURE_2D, mTestTexture);
// Draw the quad
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(x, y + (float)height, 0.0f);
glTexCoord2f(1.0f, 0.0f);
glVertex3f(x + (float)width, y + (float)height, 0.0f);
glTexCoord2f(1.0f, 1.0f);
glVertex3f(x + (float)width, y, 0.0f);
glTexCoord2f(0.0f, 1.0f);
glVertex3f(x, y, 0.0f);
glEnd();
Then I wanted to intoduce a simple shader. So I modified my code a little:
// Use shader and point it to the right texture
auto texLocation = glGetUniformLocation(mProgram, "tex");
glUseProgram(mProgram);
glUniform1i(texLocation, mTestTexture);
// Draw the quad
// Same drawing code as before...
Vertex shader:
void main(void)
{
gl_Position = ftransform();
gl_TexCoord[0] = gl_MultiTexCoord0;
}
Fragment shader:
uniform sampler2D tex;
void main()
{
vec4 color = texture2D(tex, gl_TexCoord[0].st);
gl_FragColor = color;
}
Now all I get is a black quad :-(
I already tried and tested a lot of things:
The shaders compile fine (no errors)
The quad is visible (vertex shader seems OK)
If I change the shader to produce a fixed color ("gl_FragColor = vec4(1,0,0,1);") my quad becomes red -> fragment shader is doing something!
glGetError() does not return any errors
My texLocation, mProgram and mTestTexture all seem to be valid IDs
Does anyone have an idea why I won't see my texture when using the shader?

glUniform1i(texLocation, mTestTexture);
^^^^^^^^^^^^ texture object
Texture unit indexes are bound to samplers, not texture objects.
Use texture unit zero instead:
glUniform1i(texLocation, 0);

Related

OpenGL spritesheet shader

I'm staring to understand that a vertex Shader handles transformations of my texture. While the fragment Shader handles individual pixels. But this vector math is confusing.
What I'm trying to do is render a sprite from a sprite sheet. I can render a whole image just fine, but now i'm actually trying to write my own shader.
I think its more efficient to have the graphics card do the heavy lifting, that being said;
Currently I draw whole images like so:
In my init step,
void TextureRenderer::initRenderData()
{
// Configure VAO/VBO
game_uint VBO;
game_float vertices[] = {
// Pos // Tex
0.0f, 1.0f, 0.0f, 1.0f,
1.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 0.0f,
0.0f, 1.0f, 0.0f, 1.0f,
1.0f, 1.0f, 1.0f, 1.0f,
1.0f, 0.0f, 1.0f, 0.0f
};
glGenVertexArrays(1, &this->quadVAO);
glGenBuffers(1, &VBO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindVertexArray(this->quadVAO);
glEnableVertexAttribArray(0);
glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 4 * sizeof(game_float), (GLvoid*)0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
glBindVertexArray(0);
}
Then when its time to draw any texture:
void TextureRenderer::DrawTexture(Texture2D &texture, vec2 position, vec2 size, game_float rotate, vec3 color)
{
// Prepare transformations
this->shader->Use();
glm::mat4 model;
model = glm::translate(model, vec3(position, 0.0f)); // First translate (transformations are: scale happens first, then rotation and then finall translation happens; reversed order)
model = glm::translate(model, vec3(0.5f * size.x, 0.5f * size.y, 0.0f)); // Move origin of rotation to center of quad
model = glm::rotate(model, rotate, vec3(0.0f, 0.0f, 1.0f)); // Then rotate
model = glm::translate(model, vec3(-0.5f * size.x, -0.5f * size.y, 0.0f)); // Move origin back
model = glm::scale(model, vec3(size, 1.0f)); // Last scale
this->shader->SetMatrix4("model", model);
// Render textured quad
this->shader->SetVector3f("spriteColor", color);
glActiveTexture(GL_TEXTURE0);
texture.Bind();
glBindVertexArray(this->quadVAO);
glDrawArrays(GL_TRIANGLES, 0, 6);
glBindVertexArray(0);
}
TextureShader.vs:
#version 330 core
layout (location = 0) in vec4 vertex; // <vec2 position, vec2 texCoords>
out vec2 TexCoords;
uniform mat4 model;
uniform mat4 projection;
void main()
{
TexCoords = vertex.zw;
gl_Position = projection * model * vec4(vertex.xy, 0.0, 1.0);
}
Fragment Shader:
#version 330 core
in vec2 TexCoords; //Position
out vec4 color;
uniform sampler2D image;
uniform vec3 spriteColor;
void main()
{
color = vec4(spriteColor, 1.0) * texture(image, TexCoords);
}
Now that work all fine and dandy (assuming proper opengl setup ext.)
But id like to apply this to a Sprite sheet shader, and have the GPU handle the math to draw it.
void SpriteRenderer::drawSprite(Texture2D &texture, vec2 position,game_float spriteHeight,game_float spriteWidth,int frameIndex)
{
shader->Use();//Load a diffrent Shader here.
shader->SetInteger("frameindex", frameIndex);
shader->SetVector2f("position", position);
shader->SetFloat("spriteHeight", spriteHeight);
shader->SetFloat("spriteWidth", spriteWidth);
shader->SetMatrix4("model", model);
shader->SetVector3f("spriteColor", color);
glActiveTexture(GL_TEXTURE0); //Set texture to nothin
texture.Bind(); //Bind my texture.
glBindVertexArray(this->quadVAO); //Bind the fullscreen quad
glDrawArrays(GL_TRIANGLES, 0, 6); //Draw
glBindVertexArray(0); //Unbind the quad.
}
I assume:
Inside the vertex Shader, I manipulate the VAO quad to the position it is on the canvas then set the color of the pixles in the fragment shader to that spesific region.
How would that be done?
Or would it be better off for me to pre-calculate a VAO Array for each Sprite in a sprite class? Then each draw call would be:
void SpriteRenderer::drawSprite(Texture2D &texture, vec2 position,Sprite s)
Where the sprite has these vertices stored.
Iv seen:
Techniques for drawing spritesheets in OpenGL with shaders
Somewhat similar, but id like to have the GPU handle the math all together.

OpenGL Lighting Not Shading

I'm attempting too add basic lighting to my scene. So far even without GLSL. It appears that my lighting doesn't work. And what I mean by that is everything has no shade at all (everything is lit up).
I know the picture isn't the best, but thats partly because of the texture is shaded in the image.
Here is my init code:
glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Clear to black with full alpha
glEnable(GL_DEPTH_TEST); // Enable depth testing
glDepthFunc(GL_LEQUAL); // Specify depth testing function
glClearDepth(1.0); // Clear the full extent of the depth buffer (default)
glEnable(GL_CULL_FACE); // Enable face culling
glCullFace(GL_BACK); // Cull back faces of polygons
glFrontFace(GL_CCW);
glEnable(GL_TEXTURE_2D);
glEnable(GL_BLEND);
glShadeModel(GL_SMOOTH);
glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);
glTexEnvi (GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE);
glColorMaterial ( GL_FRONT_AND_BACK, GL_AMBIENT_AND_DIFFUSE );
glEnable ( GL_COLOR_MATERIAL );
GLfloat ambientLight[] = { 0.2f, 0.2f, 0.2f, 0.2f };
GLfloat diffuseLight[] = { 0.8f, 0.8f, 0.8, 1.0f };
GLfloat specularLight[] = { 0.5f, 0.5f, 0.5f, 1.0f };
GLfloat position[] = {0, 100.0f, 0, 1.0f };
glLightModelfv(GL_AMBIENT, ambientLight);
glLightModelfv(GL_DIFFUSE, ambientLight);
glLightModelfv(GL_SPECULAR, specularLight);
glLightModelfv(GL_POSITION, position);
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight);
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight);
glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight);
glLightfv(GL_LIGHT0, GL_POSITION, position);
Draw code:
glEnable(GL_COLOR_MATERIAL);
glEnable(GL_LIGHTING);
glEnable(GL_NORMALIZE);
//For loop of the lights...
glLightfv(GL_LIGHT0+m_lights.at(i)->id, GL_POSITION, glm_to_array(m_lights.at(i)->position));
glEnable(GL_LIGHT0+m_lights.at(i)->id);
//Draw geometry here...
And yes, I am retrieving the normals from the mesh file and inserting the glNormal3f of every face. (GL_TRIANGLES) And even the plane the model sits on never gets affected even if I completely change the normal to random values.
Plane Example:
if(m_shader_programme){
glUseProgram(m_shader_programme);
}
if(m_texture_id){
glBindTexture(GL_TEXTURE_2D, m_texture_id);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
}
glBegin(GL_TRIANGLES);
glNormal3f(0.0f, 1.0f, 0.0f);
glTexCoord2f(0.0f, m_sizey/10);
glVertex3f(m_sizex, m_height, m_sizey);
glTexCoord2f(m_sizex/10, m_sizey/10);
glVertex3f(m_sizex, m_height, -m_sizey);
glTexCoord2f(m_sizex/10, 0.0f);
glVertex3f(-m_sizex, m_height, -m_sizey);
glNormal3f(0.0f, 1.0f, 0.0f);
glTexCoord2f(m_sizex/10, 0.0f);
glVertex3f(-m_sizex, m_height, -m_sizey);
glTexCoord2f(0.0f, 0.0f);
glVertex3f(-m_sizex, m_height, m_sizey);
glTexCoord2f(0.0f, m_sizey/10);
glVertex3f(m_sizex, m_height, m_sizey);
glEnd();
if(m_shader_programme){
glUseProgram(0);
}
if(m_texture_id){
glBindTexture(GL_TEXTURE_2D, 0);
}
GLSL Fragment Shader:
#version 120
uniform sampler2D tex;
void main()
{
vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = color;
}
Vertex Shader:
#version 120
void main() {
vec3 normal, lightDir;
vec4 diffuse, ambient, globalAmbient;
float NdotL;
gl_TexCoord[0] = gl_TextureMatrix[0] * gl_MultiTexCoord0;
normal = normalize(gl_NormalMatrix * gl_Normal);
lightDir = normalize(vec3(gl_LightSource[0].position));
NdotL = max(dot(normal, lightDir), 0.0);
diffuse = gl_FrontMaterial.diffuse * gl_LightSource[0].diffuse;
/* Compute the ambient and globalAmbient terms */
ambient = gl_FrontMaterial.ambient * gl_LightSource[0].ambient;
globalAmbient = gl_LightModel.ambient * gl_FrontMaterial.ambient;
gl_FrontColor = NdotL * diffuse + globalAmbient + ambient;
gl_Position = ftransform();
}
System Specs
OS X (Yosemite) 10.10.13 (64bit)
Renderer: NVIDIA GeForce GTX 780M
OpenGL Engine OpenGL version supported 2.1 NVIDIA-10.2.7 310.41.25f01
Just encase anyone is wondering why I'm not using OpenGL 3.x is because GL 3.x doesn't appear to play nice on my machine.
Your fragment shader:
vec4 color = texture2D(tex,gl_TexCoord[0].st);
gl_FragColor = color;
Simply discards all the computations you did in the vertex shader and samples the texture color. You need to multiply the texture sample by the interpolated color value:
vec4 color = texture2D(tex, gl_TexCoord[0].st) * gl_Color;
^~~~~~
gl_FragColor = color;

OpenGL unwanted transparency with deferred shading

I'm trying to implement deferred shading using the OpenGL, I render positions and normals to the textures. When I use only 1 light source without blending everything works well, but after I turn on blending, so the other light sources could contribute, I get a transparency. Does anyone had the same problem and a solution to it?
Here is my fragment shader for lighting:
#version 330
uniform vec2 ScreenSize;
uniform vec3 AmbientColor;
uniform vec3 DiffuseColor;
uniform vec3 SpecColor;
uniform vec3 LightPosition;
uniform sampler2D PositionMap;
uniform sampler2D NormalMap;
vec2 CalcTexCoord()
{
return gl_FragCoord.xy / ScreenSize;
}
out vec4 FragColor;
void main()
{
vec2 PixelPos = CalcTexCoord();
vec3 WorldPos = texture(PositionMap, PixelPos).xyz;
vec3 Normal = texture(NormalMap, PixelPos).xyz;
vec3 LightDir = normalize(LightPosition - WorldPos);
float Lambertian = max(dot(LightDir, Normal), 0.0);
float Specular = 0.0;
if(Lambertian > 0.0)
{
vec3 ViewDir = normalize(-WorldPos);
// this is blinn phong
vec3 HalfDir = normalize(LightDir + ViewDir);
float SpecAngle = max(dot(HalfDir, Normal), 0.0);
Specular = pow(SpecAngle, 16.0);
vec3 ReflectDir = reflect(-LightDir, Normal);
SpecAngle = max(dot(ReflectDir, ViewDir), 0.0);
// note that the exponent is different here
Specular = pow(SpecAngle, 4.0);
}
FragColor = vec4(AmbientColor+Lambertian*DiffuseColor+Specular*SpecColor, 1.0);
}
The geometry pass:
glDisable(GL_BLEND);
glDepthMask(GL_TRUE);
glEnable(GL_DEPTH_TEST);
glBindFramebuffer(GL_FRAMEBUFFER, Fbo);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glViewport(0, 0, Camera.Viewport.X, Camera.Viewport.Y);
UpdateInput(Window, &Camera);
UpdateMatrices(&Camera, &RenderState);
Meshes[1].ModelMat = Translate(M4(1.0f), LightPos);
UseShader(&RenderState, &GeometryShader);
for(uint8 i = 0; i < ArrayCount(Meshes); ++i)
{
`RenderMesh(&GeometryShader, &Meshes[i]);
}
glDepthMask(GL_FALSE);
glDisable(GL_DEPTH_TEST);
The light pass:
glBindFramebuffer(GL_READ_FRAMEBUFFER, Fbo);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0);
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT);
glViewport(0, 0, Camera.Viewport.X, Camera.Viewport.Y);
// Binding Position and Normal maps.
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, Targets[0].Id);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, Targets[2].Id);
glEnable(GL_BLEND);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
UseShader(&RenderState, &LightShader);
LoadVec2(&LightShader, "ScreenSize", V2(Camera.Viewport.X, Camera.Viewport.Y));
LoadVec3(&LightShader, "AmbientColor", V3(0.2f, 0.2f, 0.2f));
LoadVec3(&LightShader, "DiffuseColor", V3(0.1f, 0.1f, 0.1f));
LoadVec3(&LightShader, "SpecColor", V3(0.3f, 0.3f, 0.3f));
LoadVec3(&LightShader, "LightPosition", V3(5.0f, 3.0f, 3.0f));
LoadSampler2D(&LightShader, "PositionMap", 0);
LoadSampler2D(&LightShader, "NormalMap", 1);
for(uint8 i = 0; i < ArrayCount(Meshes); ++i)
{
RenderMesh(&LightShader, &Meshes[i]);
}
LoadVec3(&LightShader, "LightPosition", LightPos);
for(uint8 i = 0; i < ArrayCount(Meshes); ++i)
{
RenderMesh(&LightShader, &Meshes[i]);
}
glDisable(GL_BLEND);
The blending that I'm applying:
glEnable(GL_BLEND);
glBlendEquation(GL_FUNC_ADD);
glBlendFunc(GL_ONE, GL_ONE);
Textures format:
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32F, Width, Height, 0, GL_RGBA, GL_FLOAT, Pixels);
Depth texture format:
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, Width, Height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, 0);
Position and Normal maps:
http://postimg.org/image/qctx0smj7/
Blending is off:
http://postimg.org/image/7nqlz3sbr/
Blending is on:
http://postimg.org/image/99h7x2p5t/
You have to ensure that a fragment that gets blended to the final fbo is a topmost one. Otherwise you add up light in overlapping regions. If you disable blending, the problem is still the same, but you don't see it since fragments behind the topmost one are overwritten.
Conclusion: The speed advantage of deferred shading is completely wasted since you are drawing in the second pass the same number of fragments that would have been drawn in normal forward rendering.
Solutions
Most engines render the second pass not to the backbuffer but to another fbo, mainly because of post-processing. But this allows to use the same depthbuffer in both renderpathes. In the first pass depth reads and writes are performed as already done by you. In the second pass, depth write is disabled, but depth testing is still done (with GL_LESS_OR_EQUAL). This discards all fragments that are behind the topmost one. Sometimes it might be necessary to draw the objects in the second path a little bit nearer to the camera to prevent z-fighting issues.
If you cannot use a second fbo, you can do two things: Copy the depth buffer of the first pass to the default depth buffer (in general horrible performance), or you can do the depth testing in your fragment shader.

Sending two textures to GLSL shader

When sending two textures to my GLSL shader only one actually arrives. What is strange is the first texture I bind is used for both textures slots in my shader. This leads me to believe the way I am passing my textures in OpenGL is wrong. However, I am unable to track down the problem.
Here is the code where I configure the textures for use in my shader.
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo2);
glPushAttrib(GL_VIEWPORT_BIT | GL_ENABLE_BIT);
glClearColor(1.0f, 0.0f, 0.0f, 1.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// Get uniforms
GLuint pass_3O = glGetUniformLocation(blend_shader, "org");
GLuint pass_3B = glGetUniformLocation(blend_shader, "blur");
// Activate shaders
glUseProgram(blend_shader);
// Bind first texture
glActiveTexture(GL_TEXTURE0 );
glBindTexture(GL_TEXTURE_2D, init_texture);
// Bind the second texture
glActiveTexture(GL_TEXTURE1 );
glBindTexture(GL_TEXTURE_2D, third_texture);
// Assign index to 2d images
glUniform1i(pass_3O, 0);
glUniform1f(pass_3B, 1);
The code above is passing in two textures. The first is a 2D image of the first rendering pass of the 3D scene. The third is that same texture with x2 levels of blur added. This final stage is to blend them together for a poor mans bloom.
Here is the code where I am drawing both textures to the quad.
// Draw to quad
glBegin(GL_QUADS);
glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 0.0f);
glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 0.0f);
glVertex3f(-w_width/2.0, -w_height/2.0, 0.5f);
glMultiTexCoord2f(GL_TEXTURE0, 0.0f, 1.0f);
glMultiTexCoord2f(GL_TEXTURE1, 0.0f, 1.0f);
glVertex3f(-w_width/2.0, w_height/2.0, 0.5f);
glMultiTexCoord2f(GL_TEXTURE0, 1.0f, 1.0f);
glMultiTexCoord2f(GL_TEXTURE1, 1.0f, 1.0f);
glVertex3f(w_width/2.0, w_height/2.0, 0.5f);
glMultiTexCoord2f(GL_TEXTURE0, 1.0f, 0.0f);
glMultiTexCoord2f(GL_TEXTURE1, 1.0f, 0.0f);
glVertex3f(w_width/2.0, -w_height/2.0, 0.5f);
glEnd();
glFlush();
glPopAttrib();
// Unbind textures
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, 0);
// Disable blend shader
glUseProgram(0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
And here is the shader I am using to render the final image.
Vert
#version 120
void main()
{
gl_TexCoord[0] = gl_MultiTexCoord0;
gl_TexCoord[1] = gl_MultiTexCoord1;
gl_Position = ftransform();
}
Frag
#version 120
uniform sampler2D org;
uniform sampler2D blur;
void main()
{
vec4 orig_clr = texture2D( org, gl_TexCoord[0].st);
vec4 blur_clr = texture2D( blur, gl_TexCoord[1].st );
//gl_FragColor = orig_clr;
gl_FragColor = blur_clr;
}
If I switch between the last two lines in the fragment shader I get the same exact results. The only way to change which texture gets render is to change the order in which I bind them.
For example, the following would finally pass me the blurred image. Once again, only getting one of the two images.
glActiveTexture(GL_TEXTURE0 );
glBindTexture(GL_TEXTURE_2D, third_texture);
glActiveTexture(GL_TEXTURE1 );
glBindTexture(GL_TEXTURE_2D, init_texture);
Any thoughts on what I am overlooking?
Look at this code:
glUniform1i(pass_3O, 0);
glUniform1f(pass_3B, 1);
you have some small typo here, it should be glUniform1*i* instead of Uniform1*f* in the second call. The type must match that of the shader variable, so this call should just result in some error, leaving the uniform initialized at 0, which completely explains your results.

GLSL: unable to read texture from a FBO and render to another FBO with fragment shader

I'm trying to "read" the texture attached to a first FBO (fboA), modifying it (with fragment shader) and render to a second FBO (fboB).
I'm not able to figure it out, all I got is a black or white texture.
Here is the code:
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fboB);
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, textureFromFboA);
GLuint t1Location = glGetUniformLocation(shaderProgram, "texture");
glUniform1i(t1Location, 0);
glUseProgram(shaderProgram);
glBegin(GL_QUADS);
glTexCoord2f(0.0f, 0.0f);
glVertex2f (0.0f, 0.0f);
glTexCoord2f(imageRect.size.width, 0.0f);
glVertex2f (imageRect.size.width, 0.0f);
glTexCoord2f(imageRect.size.width, imageRect.size.height);
glVertex2f (imageRect.size.width, imageRect.size.height);
glTexCoord2f(0.0f, imageRect.size.height);
glVertex2f (0.0f,imageRect.size.height);
glEnd();
glDisable(GL_TEXTURE_RECTANGLE_ARB);
glFlush();
glUseProgram(0);
This is the fragment shader code:
#version 120
uniform sampler2D texture;
void main(void)
{
gl_FragColor = texture2D(texture, gl_TexCoord[0].st) * 0.8;
}
I would expect to have a darker texture rendered in fboB but I only get an all black texture. This happens also if I write gl_FragColor = texture2D(texture, gl_TexCoord[0].st);.
On the contrary, if I write gl_FragColor = vec2(1.0, 0.0, 0.0, 1.0); I correctly have an all red texture as output.
If I comment out the glUseProgram() statement, the code works fine and texture in fboB is an exact copy of texture in fboA.
Why this happens? Am I missing something?
uniform sampler2D texture;
Rectangle textures are not the same texture type as 2D textures. Yes, they're two-dimensional, but they still maintain a distinct texture type. Therefore, they cannot be accessed via a sampler2D.
So change that to a samplerRect. You will also need to use proper texture coordinates, because rectangle textures take texel-space coordinates instead of normalized coordinates.
Alternatively, you can just use a 2D texture. NPOT textures have been around for well over half a decade; you don't have to use rectangle textures to have non-power-of-two render targets.
You cannot use sampler2D for texture rectangles... you must use GL_TEXTURE_2D not GL_TEXTURE_RECTANGLE_ARB.
here you have a nice tutorial on the FBO:
http://www.songho.ca/opengl/gl_fbo.html