I'm trying to generate a cubemap array and save depth values etc. I have confirmed that it is generated correctly by glCheckFramebufferStatus, but when I actually render it, it does not store anything.
The simple code is shown below.
// Initialize
glGenTextures(1, &CubeMap);
glBindTexture(GL_TEXTURE_CUBE_MAP_ARRAY, CubeMap);
for (int j = 0; j < 6; j++) {
glTexImage3D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, 0, GL_RGBA16F, 512, 512, ArrayNum, 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
}
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP_ARRAY, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glGenRenderbuffers(1, &CubeMapRBO);
glBindRenderbuffer(GL_RENDERBUFFER, CubeMapRBO);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16, 512, 512);
glBindRenderbuffer(GL_RENDERBUFFER, 0);
// Render to CubeMap Array
for(int i = 0; i < ArrayNum; i++) {
for(int j = 0; j < 6; j++) {
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_CUBE_MAP_POSITIVE_X + j, CubeMap, i);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, CubeMapRBO);
}
}
In the case of a 2D texture array, it seems to use glFramebufferTextureLayer instead of glFramebufferTexture2D, but when I applied this to a cubemap, an error was returned.
If anyone knows anything, please comment.
Related
I have rendered a depth map to a framebuffer in the following way:
// the framebuffer
glGenFramebuffers(1, &depthMapFBO);
// completion: attacching a texture
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT32F, SCR_WIDTH, SCR_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL); // i.e. allocate memory, to be filled later at rendering
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
// bind framebuffer and attach depth texture
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
glDrawBuffer(GL_NONE); // i.e. explicitly tell we want no color data
glReadBuffer(GL_NONE); // i.e. explicitly tell we want no color data
glBindFramebuffer(GL_FRAMEBUFFER, 0);
Note the use of GL_DEPTH_COMPONENT32F because I want a high precision.
Now I want to put the values stored in the depth buffer of the framebuffer into an array, preserving the precision. How to do so? Here is what I had in mind:
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glClear(GL_DEPTH_BUFFER_BIT);
[ render the scene to framebuffer]
GLfloat * d= new GLfloat[conf::SCR_WIDTH * conf::SCR_HEIGHT];
glReadPixels(0, 0, conf::SCR_WIDTH, conf::SCR_HEIGHT, GL_DEPTH_COMPONENT, GL_FLOAT, d);
for (int i{ 0 }; i < conf::SCR_HEIGHT; ++i) {
for (int j{ 0 }; j < SCR_WIDTH; ++j) {
std::cout << d[i * SCR_WIDTH + j] << " ";
}
std::cout << std::endl;
}
However, this always prints 0.956376. Why so? I know I still have to re-linearize the depths... but why is always printed a constant value, and how can I fix this? Furthermore, is my approach correct, with regards to the lossless retrieval of information? Thanks in advance.
The same thing happens with:
GLfloat * d= new GLfloat[conf::SCR_WIDTH * conf::SCR_HEIGHT * 4];
glBindTexture(GL_TEXTURE_2D, depthMap);
glGetTexImage(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, GL_FLOAT, d);
I am trying to render to an (RGBA32UI) unsigned-integer texture2D and then read pixel data using glReadPixels. But it does not work
GLuint FramebufferName = 0;
glGenFramebuffers(1, &FramebufferName);
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
// The texture we're going to render to
GLuint renderedTexture;
glGenTextures(1, &renderedTexture);
glBindTexture(GL_TEXTURE_2D, renderedTexture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB32UI, windowWidth, windowHeight, 0, GL_RGB_INTEGER, GL_UNSIGNED_INT, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, renderedTexture, 0);
// The depth buffer
GLuint depthrenderbuffer;
glGenRenderbuffers(1, &depthrenderbuffer);
glBindRenderbuffer(GL_RENDERBUFFER, depthrenderbuffer);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, windowWidth, windowHeight);
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, depthrenderbuffer);
GLenum DrawBuffers[1] = { GL_COLOR_ATTACHMENT0 };
glDrawBuffers(1, DrawBuffers); // "1" is the size of DrawBuffers
glBindFramebuffer(GL_FRAMEBUFFER, FramebufferName);
render_scene();
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glBindBuffer(GL_READ_BUFFER, FramebufferName);
glReadBuffer(GL_COLOR_ATTACHMENT0);
GLuint data[3];
for (int i=0; i < windowWidth; i++)
for (int j = 0; j < windowHeight; j++)
{
glReadPixels(i, j, 1, 1, GL_RGB32UI, GL_UNSIGNED_INT, data);
if (data[0] == 1 && data[1] == 2 && data[2] == 3)
int a = 1;
}
Shader (GLSL)
#version 450 core
out uvec3 output_color;
void main()
{
output_color = uvec3(1, 2, 3);
}
Is there anyone experienced this problem? Please, help me.
The command
glBindBuffer(GL_READ_BUFFER, FramebufferName);
is wrong. glBindBuffer does not have the target GL_READ_BUFFER, so this should result in an GL_INVALID_ENUM error. (You should really add some error checking, preferably via debug output if available.) You can never bind an FBO with glBindBuffer. FBOs can only be bound with glBindFramebuffer. What You actually wanted to do is:
glBindFramebuffer(GL_READ_FRAMEBUFFER, FramebufferName);
glReadBuffer(GL_COLOR_ATTACHMENT0);
Also note that glBindFramebuffer(GL_FRAMEBUFFER, ...); is just a shortcut to bind GL_DRAW_FRAMEBUFFER and GL_READ_FRAMEBUFFER at once.
I have read this tutorial: Array Texture,
but I don't want to use the glTexStorage3D()(requires OpenGL 4.2) function. First of all, can someone check whether I have implemented this code properly(I'm using glTexImage3D instead of glTexStorage3D):
unsigned int nrTextures = 6;
GLsizei width = 256;
GLsizei height = 256;
GLuint arrayTextureID;
std::vector<unsigned char*> textures(nrTextures);
//textures: Load textures here...
glGenTextures(1, &arrayTextureID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D_ARRAY, arrayTextureID);
//Gamma to linear color space for each texture.
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_SRGB, width, height, nrTextures, 0, GL_RGB, GL_UNSIGNED_BYTE, NULL);
for(unsigned int i = 0; i < nrTextures; i++)
glTexSubImage3D(GL_TEXTURE_2D_ARRAY, 0, 0, 0, i, width, height, 1, GL_RGB, GL_UNSIGNED_BYTE, textures[i]);
/*glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR);*/
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D_ARRAY, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
/*glGenerateMipmap(GL_TEXTURE_2D_ARRAY);*/
glBindTexture(GL_TEXTURE_2D_ARRAY, 0);
How do I implement mipmapping with this implementation?
This mostly looks ok. The only problem I can see is that GL_SRGB is not a valid internal texture format. So the glTexImage3D() call needs to use GL_SRGB8 instead:
glTexImage3D(GL_TEXTURE_2D_ARRAY, 0, GL_SRGB8, width, height, nrTextures, 0,
GL_RGB, GL_UNSIGNED_BYTE, NULL);
For generating mipmaps, you can call:
glGenerateMipmap(GL_TEXTURE_2D_ARRAY);
after you filled the texture with data with the glTexSubImage3D() calls.
I have 4 3D textures and I am writing on them using imageStore in a single call, everything works fine. The only problem is how I clear them in each frame. This is how I create them
for (int i = 0; i < MIPLEVELS; ++i){
glGenTextures(1, &volumeTexture[i]);
glBindTexture(GL_TEXTURE_3D, volumeTexture[i]);
glTexImage3D(GL_TEXTURE_3D, 0, GL_RGBA8, volumeDimensions / (1 << i), volumeDimensions / (1 << i), volumeDimensions / (1 << i), 0, GL_RGBA, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER);
glBindTexture(GL_TEXTURE_3D, 0);
glGenFramebuffers(1, &volumeFbo[i]);
glBindFramebuffer(GL_FRAMEBUFFER, volumeFbo[i]);
glFramebufferTexture(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, volumeTexture[i], 0);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
for (int i = 0; i < MIPLEVELS; ++i){
glBindFramebuffer(GL_FRAMEBUFFER, volumeFbo[i]);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
}
and this is what I do to clear them each frame
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
for (int i = 0; i < MIPLEVELS; ++i){
glBindFramebuffer(GL_FRAMEBUFFER, volumeFbo[i]);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
}
glBindFramebuffer(GL_FRAMEBUFFER, 0);
As you can see I am using 4 different FBOs which is a big waste. I tried using 4 targets but they don't get cleared. I did it this way:
I binded a single FBO and created 4 targets using glFrameBufferTexture3D and then each frame I called
glBindFramebuffer(GL_FRAMEBUFFER, volumeFbo[0]);
GLenum buffers[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1, GL_COLOR_ATTACHMENT2, GL_COLOR_ATTACHMENT3 };
glDrawBuffers(4, buffers);
glClearColor(0, 0, 0, 0);
glClear(GL_COLOR_BUFFER_BIT);
But nothing happened.
I believe there are two ways of solving this issue.
You either do a loop where you attach each texture as GL_COLOR_ATTACHMENT0 and clear each texture.
Otherwise, you can also use the glClearBuffer family of textures:
https://www.opengl.org/sdk/docs/man3/xhtml/glClearBuffer.xml
I am trying to create a normal map in opengl that I can load into the shader and change dynamically, though currently i am stuck at how to create the texture.
I currently have this:
glActiveTexture(GL_TEXTURE7);
glGenTextures(1, &normals);
glBindTexture(GL_TEXTURE_2D, normals);
texels = new Vector3f*[256];
for(int i = 0; i < 256; ++i){
texels[i] = new Vector3f[256];
}
this->setup_normals();
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
glTexImage2D(GL_TEXTURE_2D, 0, 3, 256, 256, 0, GL_RGB, GL_FLOAT, texels);
...
void setup_normals(){
for(int i = 0; i < 256; ++i){
for(int j = 0; j < 256; ++j){
texels[i][j][0] = 0.0f;
texels[i][j][1] = 1.0f;
texels[i][j][2] = 0.0f;
}
}
}
where Vector3f is: typedef float Vector3f[3];
and texels is: Vector3f** texels;
When I draw this texture to a screenquad using an orthogonal matrix( which works for textures loaded in) I get .
I am unsure why it does not appear fully green and also what is causing the black streaks to appear within it. Any help appreciated.
Your array needs to be contiguous since glTexImage2D() doesn't take any sort of stride or row mapping parameters:
texels = new Vector3f[256*256];