Rendering to cube map - opengl

According to ARB_geometry_shader4 it is possible to render a scene onto the 6 faces of a cube map with a geometry shader and the cube map attached to a framebuffer object. I want to create a shadow map using this way. However there seems to be a conflict that I can't resolve:
I can only attach a texture with GL_DEPTH_COMPONENT as internal type to the GL_DEPTH_ATTACHMENT_EXT.
A depth texture can only be 1D or 2D.
If I want to attach a cube map, all other attached textures must be cube maps as well.
So it looks like I can't use any depth testing when I want to render to a cube map. Or what exactly am I missing here?
EDIT: It looks like newer Nvidia drivers (180.48) support depth cube maps.

Ok, to answer some other questions here:
Of course it is possible to use 6 FBOs, one for each face. Or to use one FBO and attach each face before you draw to it. In both cases the cube map face will be treated like any other 2D texture and you can use it together with normal 2D textures or Renderbuffers. And there's probably not much of a difference in all the possible ways (if the hardware supports them).
However, it's also possible to draw everything in one step and as I was curious as to how this is done I did some research.
To create a FBO with all faces of a cube map attached to a single attachment point I used this code (written in D):
// depth cube map
glGenTextures(1, &tDepthCubeMap);
glBindTexture(GL_TEXTURE_CUBE_MAP, tDepthCubeMap);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (uint face = 0; face < 6; face++) {
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, 0, GL_DEPTH_COMPONENT24,
width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, null);
}
// color cube map
glGenTextures(1, &tColorCubeMap);
glBindTexture(GL_TEXTURE_CUBE_MAP, tColorCubeMap);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameterf(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
for (uint face = 0; face < 6; face++) {
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + face, 0, GL_RGBA,
width, height, 0, GL_RGBA, GL_FLOAT, null);
}
// framebuffer object
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fbo);
glFramebufferTextureARB(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, tDepthCubeMap, 0);
glFramebufferTextureARB(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT, tColorCubeMap, 0);
glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
if (!isValidFBO()) {
glDeleteFramebuffersEXT(1, &fbo);
fbo = 0;
}
If you want to have only a depth map you have to change glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT); to glDrawBuffer(GL_NONE); before validating it (and before drawing to it)
MIN and MAG filters must be set to something valid (default would be GL_NEAREST_MIPMAP_LINEAR)
width and height of all textures must be the same
To render to the faces of a cube map you need a geometry shader. The following shader misses some rotations but it should be clear what it does. gl_Layer is used to direct the primitive to the correct face (0 = +X, 1 = -X, ...).
#version 120
#extension GL_EXT_geometry_shader4 : enable
void main(void) {
int i, layer;
for (layer = 0; layer < 6; layer++) {
gl_Layer = layer;
for (i = 0; i < 3; i++) {
gl_Position = gl_PositionIn[i];
EmitVertex();
}
EndPrimitive();
}
}

Related

Calculating color histogram of framebuffer inside compute shader

As the title suggests, I am rendering a scene onto a framebuffer and I am trying to extract the color histogram from that framebuffer inside a compute shader. I am totally new to using compute shaders and the lack of tutorials/examples/keywords has overwhelmed me.
In particular, I am struggling to properly set up the input and output images of the compute shader. Here's what I have:
computeShaderProgram = loadComputeShader("test.computeshader");
int bin_size = 1;
int num_bins = 256 / bin_size;
tex_w = 1024;
tex_h = 768;
GLuint renderFBO, renderTexture;
GLuint tex_output;
//defining output image that will contain the histogram
glGenTextures(1, &tex_output);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, tex_output);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R16, num_bins, 3, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_INT, NULL);
glBindImageTexture(0, tex_output, 0, GL_FALSE, 0, GL_WRITE_ONLY, GL_R16UI);
//defining the framebuffer the scene will be rendered on
glGenFramebuffers(1, &renderFBO);
glGenTextures(1, &renderTexture);
glBindTexture(GL_TEXTURE_2D, renderTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, W_WIDTH, W_HEIGHT, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glBindFramebuffer(GL_FRAMEBUFFER, renderFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, renderTexture, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
In the main loop I draw a simple square onto the framebuffer and attempt to pass the framebuffer as input image to the compute shader:
glBindFramebuffer(GL_FRAMEBUFFER, renderFBO);
glDrawArrays(GL_TRIANGLES, 0, 6);
glUseProgram(computeShaderProgram);
//use as input the drawn framebuffer
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, renderFBO);
//use as output a pre-defined texture image
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, tex_output);
//run compute shader
glDispatchCompute((GLuint)tex_w, (GLuint)tex_h, 1);
GLuint *outBuffer = new GLuint[num_bins * 3];
glGetTexImage(GL_TEXTURE_2D, 0, GL_R16, GL_UNSIGNED_INT, outBuffer);
Finally, inside the compute shader I have:
#version 450
layout(local_size_x = 1, local_size_y = 1) in;
layout(rgba32f, binding = 0) uniform readonly image2D img_input;
layout(r16ui, binding = 1) uniform writeonly image2D img_output;
void main() {
// grabbing pixel value from input image
vec4 pixel_color = imageLoad(img_input, ivec2(gl_GlobalInvocationID.xy));
vec3 rgb = round(pixel_color.rgb * 255);
ivec2 r = ivec2(rgb.r, 0);
ivec2 g = ivec2(rgb.g, 1);
ivec2 b = ivec2(rgb.b, 2);
imageAtomicAdd(img_output, r, 1);
imageAtomicAdd(img_output, g, 1);
imageAtomicAdd(img_output, b, 1);
}
I defined the output as a 2d texture image of size N x 3 where N is the number of bins and the 3 accounts for the individual color components. Inside the shader I grab a pixel value from the input image, scale it into the 0-255 range and increment the appropriate location in the histogram.
I cannot verify that this works as intended because the compute shader produces compilation errors, namely:
can't apply layout(r16ui) to image type "image2D"
unable to find compatible overloaded function "imageAtomicAdd(struct image2D1x16_bindless, ivec2, int)"
EDIT: after changing to r32ui the previous error now becomes: qualified actual parameter #1 cannot be converted to less qualified parameter ("im")
How can I properly configure my compute shader?
Is my process correct (at least in theory) and if not, why?
As for your questions:
can't apply layout(r16ui) to image type "image2D"
r16ui can only be applied to unsigned image types, thus you should use uimage2D.
unable to find compatible overloaded function ...
The spec explicitly says that atomic operations can only by applied to 32-bit types (r32i, r32ui, or r32f). Thus you must use a 32-bit texture instead.
Your have other issues in your code too.
glBindTexture(GL_TEXTURE_2D, renderFBO);
You cannot bind an FBO to a texture. You should instead bind the texture that backs the FBO (renderTexture).
Also, you intend to bind a texture to an image uniform rather than a sampler, thus you must use glBindImageTexture or glBindImageTextures rather than glBindTexture. With the later you can bind both images in one call:
GLuint images[] = { renderTexture, tex_output };
glBindImageTextures(0, 2, images);
Your img_output uniform is marked as writeonly. However the atomic image functions expect an unqualified uniform. So remove the writeonly.
You can find all the above information in the OpenGL and GLSL specs, freely accessible from the OpenGL registry.

OpenGL Depth Texture is the same as Color Texture

I'm trying to visualize the depth texture attached to a custom fbo in opengl. I expected to see a "foggy" looking, greyscale output - the problem is that the output seems like it's the same as the color texture - I mean it's the exact same color texture. Am I actually rendering the same thing into 2 different textures?
Creating the depth texture:
glGenTextures(1, &m_DepthTxId);
glBindTexture(GL_TEXTURE_2D, m_DepthTxId);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, width, height, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, (void*)nullptr);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
Creating the fbo & attaching the color/depth textures:
glGenFramebuffers(1, &m_Id);
glBindFramebuffer(GL_FRAMEBUFFER, m_Id);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, m_ColorTxId, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, m_DepthTxId, 0);
m_DrawBuffers[0] = GL_COLOR_ATTACHMENT0;
glDrawBuffers(1, m_DrawBuffers);
The interesting thing is that without adding the depth texture, the objects are displayed in the order they were rendered (scene looks messed up) - but when I add the depth texture, everything looks fine as you would expect with depth testing.
But how come when I give the depth texture to the shader it just displays the color-texture? Am I rendering color data into my depth texture, or..?
Apologies, but I'm fairly new to working with fbos :P
Additional info:
Rendering the fbo-textures onto a quad:
glActiveTexture(GL_TEXTURE0); // Color
glBindTexture(GL_TEXTURE_2D, m_Fbo->getColorTxId()); // ColorTexture ID
glUniform1i(m_Shader->getColorSampler(), 0); // ColorSampler Location
glActiveTexture(GL_TEXTURE1); // Depth
glBindTexture(GL_TEXTURE_2D, m_Fbo->getDepthTxId()); // DepthTexture ID
glUniform1i(m_Shader->getDepthSampler(), 0); // DepthSampler Location
glBindVertexArray(m_Fbo->getVaoId());
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_SHORT, (void*)0);
glBindVertexArray(0);
Getting the sampler locations from the shader:
m_Location_ColorTxSampler = glGetUniformLocation(m_ProgramId, "colorSampler");
m_Location_DepthTxSampler = glGetUniformLocation(m_ProgramId, "depthSampler");
Shader:
in vec2 uv;
uniform sampler2D colorSampler;
uniform sampler2D depthSampler;
out vec4 color;
void main()
{
color = vec4(texture2D(depthSampler, uv).rgb, 1.0);
}
To me the whole thing seems correct, unless the 2 samplers are at the same location.. I'm sure that's not possible
If you bind the depth texture to unit 1...
glActiveTexture(GL_TEXTURE1); // Depth
glBindTexture(GL_TEXTURE_2D, m_Fbo->getDepthTxId()); // DepthTexture ID
... you should not tell the shader to sample from unit 0:
glUniform1i(m_Shader->getDepthSampler(), 0); // DepthSampler Location

Why do I get incomplete attachment when setting up opengl fbo for rendering to cubemap depth texture?

I am attempting to render a scene's depth to a cubemap texture, but upon checking the framebuffer status it returns GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT and of course no rendering occurs. Obviously I am missing something but I cannot seem to figure that out, I was hoping somebody here could help me with that. My video card is a NVIDIA GeForce 840M that supports OpenGL 4.5. Below is how I set up the rendering to the fbo.
UPDATE: Attach all cubefacetextures before rendering loop
GLuint texhandle;
glGenTextures(1, &texhandle);
glBindTexture(GL_TEXTURE_CUBE_MAP, texhandle);
for (int i = 0; i < 6; i++) {
glTexImage2D(
GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0,
GL_DEPTH_COMPONENT24, 1600, 900,
0, GL_DEPTH_COMPONENT, GL_FLOAT, 0
);
}
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
GLuint fbohandle;
glGenFramebuffers(num, &fbohandle);
glBindFramebuffer(GL_FRAMEBUFFER, fbohandle);
glCullFace(GL_BACK);
for (int i = 0; i < 6; i++) {
glFramebufferTexture2D(
GL_FRAMEBUFFER,
GL_DEPTH_ATTACHMENT,
GL_TEXTURE_CUBE_MAP_POSITIVE_X + i,
fbohandle,
0
);
}
for (int i = 0; i < 6; i++) {
GLenum cubemapSide = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i;
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, cubemapSide, fbohandle, 0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glDrawBuffer(GL_NONE);
CheckFramebufferStatus();
RenderScene()
}
Let's handle the easy mistake first:
glFramebufferTexture2D(..., fbohandle, ...);
This is wrong, you should of course use texhandle to attach the specific texture object (to the currently bound GL_FRAMEBUFFER, which will be fbohandle at that time).
However, you seem to have misconceptions about how layered framebuffers work. There is only one GL_DEPTH_ATTACHMENT per FBO, hence you can only attach one thing texture object (or render buffer) to it.
What your code conceptually would do (after fixing the mistake above) is binding each face of the cube map as the single, unlayered, depth attachment, so after the loop, the last cube face is attached, and the loop itself is completely pointless.
To use layered rendering, you need to
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, texobj, 0);
which will in the case of a cubemap create a depth attachments with 6 layers refering to the sides of the cube map, and more specifically to the mimap level 0 of all of these.

Opengl glsl can't mix 2d textures and 3d textures

[Edit2]: Nothing wrong with this code. My shader class didn't load the uniforms correctly.
[Edit]: It seems like I can only use GL_TEXTURE0/texture unit 0 by some reason.
What I want is to draw a 2d texture and a 3d texture, but only the texture with texture unit 0(GL_TEXTURE_0) will work. And I use both of them at the same time in the shader I can't see anything using that shader.
This is the fragment shader code I want to use:
#version 330 core
// Interpolated values from the vertex shaders
in vec3 fragmentColor;
in vec3 fragmentPosition;
// Ouput data
out vec3 color;
uniform sampler3D textureSampler3D;
uniform sampler2D textureSampler2D;
float getInputLight(vec3 pos);
void main(){
// Get the nearest corner
vec3 cornerPosition = vec3(round(fragmentPosition.x), round(fragmentPosition.y), round(fragmentPosition.z));
float light = getInputLight(cornerPosition);
color = (0.5+16*light)*fragmentColor;
}
float getInputLight(vec3 pos) {
if (pos.z <= 0.f)
return texture2D(textureSampler2D, vec2(pos.x/16, pos.y/16)).r;
return texture(textureSampler3D, vec3(pos.x/16, pos.y/16, pos.z/16)).r;
}
But with that I can't see anything made by that shader. If I use this I can see what the 2d textures does.
float getInputLight(vec3 pos) {
if (pos.z <= 0.f)
return texture(textureSampler2D, vec2(pos.x/16, pos.y/16)).r;
return 0.f;
}
If I use this it will work perfectly except that I only have the 3d texture:
float getInputLight(vec3 pos) {
return texture(textureSampler3D, pos/16).r;
}
That means that I can only use one of the textures on the shaders. When I say that I use the 3d texture then I change getInputLight so I it just used the 3d texture. I do the same thing with 2d textures except that I change it to the other version.
This is the c++ code I use to load the 3d texture:
glGenTextures(1, &m_3dTextureBuffer);
glBindTexture(GL_TEXTURE_3D, m_3dTextureBuffer);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage3D(GL_TEXTURE_3D, 0, GL_R8, voxelMatrixWidth, voxelMatrixHeight, voxelMatrixDepth, 0, GL_RED, GL_UNSIGNED_BYTE, (GLvoid*)m_lightData);
This is the code I use to load the 2d texture:
GLuint buffer;
unsigned char *voidData = new unsigned char[256];
// With this I can see if the shader has right data.
for (int i = 0; i < 256; ++i)
voidData[i] = i%16;
glGenTextures(1, &buffer);
glBindTexture(GL_TEXTURE_2D, buffer);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, 16, 16, 0, GL_RED, GL_UNSIGNED_BYTE, (GLvoid*)voidData);
m_3dTextureBuffer = buffer;
This is the code I run before it draws the vertex buffer:
GLint texture3dId = shader->getUniform(1);
GLint texture2dId = shader->getUniform(2);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D, m_3dTextureBuffer);
glUniform1i(texture3dId, 0);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, m_2dTextureBuffer);
//glUniform1i(texture3dId,1);
If I use texture unit 0(TEXTURE0) for both 2d texture and 3d texture I get data/pixels of what I expect.
This is a picture of it and it is what I expect:
http://oi57.tinypic.com/3wrbd.jpg
If I use different units I get this random data and it falshes sometimes(every pixel turns black/0 for a frame). The random data doesn't change either. If you look in some direction it doesn't flash and some directions flash faster than others.
http://oi58.tinypic.com/2ltiqoo.jpg
When I swap texture unit of the 3d texture an the 2d texture the same thing happens, but the 2d texture works an the 3d texture fails.
Do you have any idea what it could be?
There was nothing wrong with this code. My shader class didn't load the uniforms correctly.

Unable to render depth texture

I'm having trouble with rendering depth texture using frame buffer in opengl and I can not find the problem by myself.
Here are the setup:
//initialize color texture
glGenTextures(1, &color_buffer);
glBindTexture(GL_TEXTURE_2D, color_buffer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA8, width, height, 0, GL_RGBA, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_S,GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D,GL_TEXTURE_WRAP_T,GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_2D, 0);
//initialize depth texture
glGenTextures(1, &depth_buffer);
glBindTexture(GL_TEXTURE_2D, depth_buffer);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, width, height, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
//bind both textures to a frame buffer
glGenFramebuffers(1, &frame_buffer);
glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, color_buffer, 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_buffer, 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
After rendering the scene to the framebuffer, I used the following codes to render the texture. When the color_buffer is used, the scene is correctly drawn. But when I use depth_buffer, the screen is all white. I'm not sure what is wrong in here. My fragment shader just use gl_FragColor = texture2D(texture_ID,texture_coord); to render. What is wrong with my codes? How can I render a depth texture?
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, ***color_buffer***);
glUseProgramObjectARB( program );
glUniform1i(glGetUniformLocation(program,"img"),0);
//codes that attach the texture to a quad
But when I use depth_buffer, the screen is all white.
My fragment shader just use gl_FragColor = texture2D(texture_ID,texture_coord); to render.
That is your depth buffer. It's not possible to be sure without knowing anything about what you have actually rendered (or about the projection matrix you use). But generally speaking, perspective projections tend to be a very skewed transform. It skews the Z to farther values, on the [0, 1] range, so most numbers will be closer to 1 than to 0.
If you want depth values in some kind of linear space, then you'll need to linearize them. That's going to be somewhat difficult without the clip-space W to multiply with. It's doable, but you'll need values from your perspective matrix to do it.